From b7d9a717c0fe848e33e959f8ca2235715770d3ed Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Fri, 31 May 2024 14:19:08 +0100 Subject: [PATCH 1/5] standardize colons spacing in docstrings --- src/spikeinterface/comparison/collision.py | 2 +- src/spikeinterface/comparison/hybrid.py | 36 +++--- .../comparison/multicomparisons.py | 40 +++--- .../comparison/paircomparisons.py | 102 +++++++-------- src/spikeinterface/core/baserecording.py | 64 +++++----- src/spikeinterface/core/basesnippets.py | 16 +-- src/spikeinterface/core/basesorting.py | 28 ++--- src/spikeinterface/core/binaryfolder.py | 4 +- .../core/binaryrecordingextractor.py | 34 ++--- src/spikeinterface/core/job_tools.py | 60 ++++----- src/spikeinterface/core/npyfoldersnippets.py | 4 +- src/spikeinterface/core/numpyextractors.py | 58 ++++----- src/spikeinterface/core/recording_tools.py | 102 +++++++-------- src/spikeinterface/core/sortinganalyzer.py | 62 ++++----- src/spikeinterface/core/sortingfolder.py | 4 +- src/spikeinterface/core/sparsity.py | 54 ++++---- src/spikeinterface/core/template_tools.py | 60 ++++----- src/spikeinterface/core/zarrextractors.py | 48 +++---- src/spikeinterface/curation/auto_merge.py | 70 +++++------ .../curation/curationsorting.py | 26 ++-- .../curation/mergeunitssorting.py | 12 +- .../curation/remove_duplicated_spikes.py | 8 +- .../curation/remove_excess_spikes.py | 12 +- .../curation/remove_redundant.py | 26 ++-- .../curation/splitunitsorting.py | 12 +- src/spikeinterface/exporters/report.py | 14 +-- src/spikeinterface/exporters/to_phy.py | 24 ++-- src/spikeinterface/extractors/cbin_ibl.py | 6 +- .../extractors/herdingspikesextractors.py | 2 +- .../extractors/klustaextractors.py | 2 +- .../extractors/mdaextractors.py | 22 ++-- .../extractors/neoextractors/alphaomega.py | 10 +- .../extractors/neoextractors/axona.py | 4 +- .../extractors/neoextractors/biocam.py | 12 +- .../extractors/neoextractors/blackrock.py | 16 +-- .../extractors/neoextractors/ced.py | 10 +- .../extractors/neoextractors/intan.py | 8 +- .../extractors/neoextractors/maxwell.py | 12 +- .../extractors/neoextractors/mcsraw.py | 10 +- .../extractors/neoextractors/mearec.py | 10 +- .../extractors/neoextractors/neuralynx.py | 20 +-- .../extractors/neoextractors/neuroexplorer.py | 8 +- .../extractors/neoextractors/neuroscope.py | 26 ++-- .../extractors/neoextractors/nix.py | 10 +- .../extractors/neoextractors/openephys.py | 56 ++++----- .../extractors/neoextractors/plexon.py | 10 +- .../extractors/neoextractors/plexon2.py | 14 +-- .../extractors/neoextractors/spike2.py | 8 +- .../extractors/neoextractors/spikegadgets.py | 8 +- .../extractors/neoextractors/spikeglx.py | 10 +- .../extractors/neoextractors/tdt.py | 8 +- .../extractors/nwbextractors.py | 74 +++++------ .../extractors/phykilosortextractors.py | 22 ++-- src/spikeinterface/extractors/toy_example.py | 24 ++-- src/spikeinterface/generation/drift_tools.py | 62 ++++----- .../postprocessing/alignsorting.py | 6 +- src/spikeinterface/preprocessing/clip.py | 26 ++-- .../preprocessing/common_reference.py | 18 +-- .../preprocessing/correct_lsb.py | 10 +- .../preprocessing/depth_order.py | 4 +- .../preprocessing/detect_bad_channels.py | 54 ++++---- src/spikeinterface/preprocessing/filter.py | 56 ++++----- .../preprocessing/filter_gaussian.py | 10 +- .../preprocessing/interpolate_bad_channels.py | 10 +- src/spikeinterface/preprocessing/motion.py | 24 ++-- .../preprocessing/normalize_scale.py | 48 +++---- .../preprocessing/phase_shift.py | 8 +- .../preprocessing/remove_artifacts.py | 28 ++--- src/spikeinterface/preprocessing/resample.py | 8 +- .../preprocessing/silence_periods.py | 12 +- .../preprocessing/unsigned_to_signed.py | 4 +- src/spikeinterface/preprocessing/whiten.py | 24 ++-- src/spikeinterface/sorters/basesorter.py | 2 +- src/spikeinterface/sorters/launcher.py | 32 ++--- src/spikeinterface/sorters/runsorter.py | 118 +++++++++--------- src/spikeinterface/sorters/sorterlist.py | 12 +- .../widgets/all_amplitudes_distributions.py | 8 +- src/spikeinterface/widgets/amplitudes.py | 14 +-- src/spikeinterface/widgets/collision.py | 32 ++--- src/spikeinterface/widgets/comparison.py | 14 +-- .../widgets/crosscorrelograms.py | 12 +- src/spikeinterface/widgets/gtstudy.py | 26 ++-- .../widgets/isi_distribution.py | 8 +- src/spikeinterface/widgets/motion.py | 20 +-- src/spikeinterface/widgets/multicomparison.py | 32 ++--- src/spikeinterface/widgets/peak_activity.py | 14 +-- src/spikeinterface/widgets/probe_map.py | 8 +- src/spikeinterface/widgets/quality_metrics.py | 10 +- src/spikeinterface/widgets/rasters.py | 10 +- src/spikeinterface/widgets/sorting_summary.py | 14 +-- src/spikeinterface/widgets/spike_locations.py | 18 +-- .../widgets/spikes_on_traces.py | 34 ++--- .../widgets/template_metrics.py | 10 +- .../widgets/template_similarity.py | 12 +- src/spikeinterface/widgets/traces.py | 36 +++--- src/spikeinterface/widgets/unit_depths.py | 6 +- src/spikeinterface/widgets/unit_locations.py | 14 +-- src/spikeinterface/widgets/unit_presence.py | 10 +- src/spikeinterface/widgets/unit_probe_map.py | 10 +- src/spikeinterface/widgets/unit_summary.py | 4 +- src/spikeinterface/widgets/unit_waveforms.py | 48 +++---- .../widgets/unit_waveforms_density_map.py | 14 +-- 102 files changed, 1209 insertions(+), 1209 deletions(-) diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index 8d750646a0..9b455e6200 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -14,7 +14,7 @@ class CollisionGTComparison(GroundTruthComparison): This class needs maintenance and need a bit of refactoring. - collision_lag: float + collision_lag : float Collision lag in ms. """ diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index 75812bad17..beb9682e37 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -26,32 +26,32 @@ class HybridUnitsRecording(InjectTemplatesRecording): Parameters ---------- - parent_recording: BaseRecording + parent_recording : BaseRecording Existing recording to add on top of. - templates: np.ndarray[n_units, n_samples, n_channels] + templates : np.ndarray[n_units, n_samples, n_channels] Array containing the templates to inject for all the units. - injected_sorting: BaseSorting | None: + injected_sorting : BaseSorting | None: The sorting for the injected units. If None, will be generated using the following parameters. - nbefore: list[int] | int | None + nbefore : list[int] | int | None Where is the center of the template for each unit? If None, will default to the highest peak. - firing_rate: float + firing_rate : float The firing rate of the injected units (in Hz). - amplitude_factor: np.ndarray | None: + amplitude_factor : np.ndarray | None: The amplitude factor for each spike. If None, will be generated as a gaussian centered at 1.0 and with an std of amplitude_std. - amplitude_std: float + amplitude_std : float The standard deviation of the amplitude (centered at 1.0). - refractory_period_ms: float + refractory_period_ms : float The refractory period of the injected spike train (in ms). - injected_sorting_folder: str | Path | None + injected_sorting_folder : str | Path | None If given, the injected sorting is saved to this folder. It must be specified if injected_sorting is None or not serialisable to file. Returns ------- - hybrid_units_recording: HybridUnitsRecording + hybrid_units_recording : HybridUnitsRecording The recording containing real and hybrid units. """ @@ -128,29 +128,29 @@ class HybridSpikesRecording(InjectTemplatesRecording): Parameters ---------- - wvf_extractor: WaveformExtractor + wvf_extractor : WaveformExtractor The waveform extractor object of the existing recording. - injected_sorting: BaseSorting | None + injected_sorting : BaseSorting | None Additional spikes to inject. If None, will generate it. - max_injected_per_unit: int + max_injected_per_unit : int If injected_sorting=None, the max number of spikes per unit that is allowed to be injected. - unit_ids: list[int] | None + unit_ids : list[int] | None unit_ids to take in the wvf_extractor for spikes injection. - injected_rate: float + injected_rate : float If injected_sorting=None, the max fraction of spikes per unit that is allowed to be injected. - refractory_period_ms: float + refractory_period_ms : float If injected_sorting=None, the injected spikes need to respect this refractory period. - injected_sorting_folder: str | Path | None + injected_sorting_folder : str | Path | None If given, the injected sorting is saved to this folder. It must be specified if injected_sorting is None or not serializable to file. Returns ------- - hybrid_spikes_recording: HybridSpikesRecording: + hybrid_spikes_recording : HybridSpikesRecording: The recording containing units with real and hybrid spikes. """ diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 77adcaa8ca..f6db1ab7a5 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -25,29 +25,29 @@ class MultiSortingComparison(BaseMultiComparison, MixinSpikeTrainComparison): Parameters ---------- - sorting_list: list + sorting_list : list List of sorting extractor objects to be compared - name_list: list, default: None + name_list : list, default : None List of spike sorter names. If not given, sorters are named as "sorter0", "sorter1", "sorter2", etc. - delta_time: float, default: 0.4 + delta_time : float, default : 0.4 Number of ms to consider coincident spikes - match_score: float, default: 0.5 + match_score : float, default : 0.5 Minimum agreement score to match units - chance_score: float, default: 0.1 + chance_score : float, default : 0.1 Minimum agreement score to for a possible match - n_jobs: int, default: -1 + n_jobs : int, default : -1 Number of cores to use in parallel. Uses all available if -1 - spiketrain_mode: "union" | "intersection", default: "union" + spiketrain_mode : "union" | "intersection", default : "union" Mode to extract agreement spike trains: - - "union": spike trains are the union between the spike trains of the best matching two sorters - - "intersection": spike trains are the intersection between the spike trains of the + - "union" : spike trains are the union between the spike trains of the best matching two sorters + - "intersection" : spike trains are the intersection between the spike trains of the best matching two sorters - verbose: bool, default: False + verbose : bool, default : False if True, output is verbose Returns ------- - multi_sorting_comparison: MultiSortingComparison + multi_sorting_comparison : MultiSortingComparison MultiSortingComparison object with the multiple sorter comparison """ @@ -162,15 +162,15 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou Parameters ---------- - minimum_agreement_count: int + minimum_agreement_count : int Minimum number of matches among sorters to include a unit. - minimum_agreement_count_only: bool + minimum_agreement_count_only : bool If True, only units with agreement == "minimum_matching" are included. If False, units with an agreement >= "minimum_matching" are included Returns ------- - agreement_sorting: AgreementSortingExtractor + agreement_sorting : AgreementSortingExtractor The output AgreementSortingExtractor """ assert minimum_agreement_count > 0, "'minimum_agreement_count' should be greater than 0" @@ -309,20 +309,20 @@ class MultiTemplateComparison(BaseMultiComparison, MixinTemplateComparison): Parameters ---------- - waveform_list: list + waveform_list : list List of waveform extractor objects to be compared - name_list: list, default: None + name_list : list, default : None List of session names. If not given, sorters are named as "sess0", "sess1", "sess2", etc. - match_score: float, default: 0.8 + match_score : float, default : 0.8 Minimum agreement score to match units - chance_score: float, default: 0.3 + chance_score : float, default : 0.3 Minimum agreement score to for a possible match - verbose: bool, default: False + verbose : bool, default : False if True, output is verbose Returns ------- - multi_template_comparison: MultiTemplateComparison + multi_template_comparison : MultiTemplateComparison MultiTemplateComparison object with the multiple template comparisons """ diff --git a/src/spikeinterface/comparison/paircomparisons.py b/src/spikeinterface/comparison/paircomparisons.py index 50c3ee4071..fac711cda0 100644 --- a/src/spikeinterface/comparison/paircomparisons.py +++ b/src/spikeinterface/comparison/paircomparisons.py @@ -112,28 +112,28 @@ class SymmetricSortingComparison(BasePairSorterComparison): Parameters ---------- - sorting1: SortingExtractor + sorting1 : SortingExtractor The first sorting for the comparison - sorting2: SortingExtractor + sorting2 : SortingExtractor The second sorting for the comparison - sorting1_name: str, default: None + sorting1_name : str, default : None The name of sorter 1 - sorting2_name: : str, default: None + sorting2_name : : str, default : None The name of sorter 2 - delta_time: float, default: 0.4 + delta_time : float, default : 0.4 Number of ms to consider coincident spikes - match_score: float, default: 0.5 + match_score : float, default : 0.5 Minimum agreement score to match units - chance_score: float, default: 0.1 + chance_score : float, default : 0.1 Minimum agreement score to for a possible match - n_jobs: int, default: -1 + n_jobs : int, default : -1 Number of cores to use in parallel. Uses all available if -1 - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose Returns ------- - sorting_comparison: SortingComparison + sorting_comparison : SortingComparison The SortingComparison object """ @@ -215,44 +215,44 @@ class GroundTruthComparison(BasePairSorterComparison): Parameters ---------- - gt_sorting: SortingExtractor + gt_sorting : SortingExtractor The first sorting for the comparison - tested_sorting: SortingExtractor + tested_sorting : SortingExtractor The second sorting for the comparison - gt_name: str, default: None + gt_name : str, default : None The name of sorter 1 - tested_name: : str, default: None + tested_name : : str, default : None The name of sorter 2 - delta_time: float, default: 0.4 + delta_time : float, default : 0.4 Number of ms to consider coincident spikes - match_score: float, default: 0.5 + match_score : float, default : 0.5 Minimum agreement score to match units - chance_score: float, default: 0.1 + chance_score : float, default : 0.1 Minimum agreement score to for a possible match - redundant_score: float, default: 0.2 + redundant_score : float, default : 0.2 Agreement score above which units are redundant - overmerged_score: float, default: 0.2 + overmerged_score : float, default : 0.2 Agreement score above which units can be overmerged - well_detected_score: float, default: 0.8 + well_detected_score : float, default : 0.8 Agreement score above which units are well detected - exhaustive_gt: bool, default: False + exhaustive_gt : bool, default : False Tell if the ground true is "exhaustive" or not. In other world if the GT have all possible units. It allows more performance measurement. For instance, MEArec simulated dataset have exhaustive_gt=True - match_mode: "hungarian" | "best", default: "hungarian" + match_mode : "hungarian" | "best", default : "hungarian" The method to match units - n_jobs: int, default: -1 + n_jobs : int, default : -1 Number of cores to use in parallel. Uses all available if -1 - compute_labels: bool, default: False + compute_labels : bool, default : False If True, labels are computed at instantiation - compute_misclassifications: bool, default: False + compute_misclassifications : bool, default : False If True, misclassifications are computed at instantiation - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose Returns ------- - sorting_comparison: SortingComparison + sorting_comparison : SortingComparison The SortingComparison object """ @@ -366,7 +366,7 @@ def get_confusion_matrix(self): Returns ------- - confusion_matrix: pandas.DataFrame + confusion_matrix : pandas.DataFrame The confusion matrix """ if self._confusion_matrix is None: @@ -392,14 +392,14 @@ def get_performance(self, method="by_unit", output="pandas"): Parameters ---------- - method: "by_unit" | "pooled_with_average", default: "by_unit" + method : "by_unit" | "pooled_with_average", default : "by_unit" The method to compute performance - output: "pandas" | "dict", default: "pandas" + output : "pandas" | "dict", default : "pandas" The output format Returns ------- - perf: pandas dataframe/series (or dict) + perf : pandas dataframe/series (or dict) dataframe/series (based on "output") with performance entries """ import pandas as pd @@ -478,7 +478,7 @@ def get_well_detected_units(self, well_detected_score=None): Parameters ---------- - well_detected_score: float, default: None + well_detected_score : float, default : None The agreement score above which tested units are counted as "well detected". """ @@ -514,7 +514,7 @@ def get_false_positive_units(self, redundant_score=None): Parameters ---------- - redundant_score: float, default: None + redundant_score : float, default : None The agreement score below which tested units are counted as "false positive"" (and not "redundant"). """ @@ -554,7 +554,7 @@ def get_redundant_units(self, redundant_score=None): Parameters ---------- - redundant_score=None: float, default: None + redundant_score=None : float, default : None The agreement score above which tested units are counted as "redundant" (and not "false positive" ). """ @@ -589,7 +589,7 @@ def get_overmerged_units(self, overmerged_score=None): Parameters ---------- - overmerged_score: float, default: None + overmerged_score : float, default : None Tested units with 2 or more agreement scores above "overmerged_score" are counted as "overmerged". """ @@ -664,24 +664,24 @@ def count_units_categories( _template_txt_performance = """PERFORMANCE ({method}) ----------- -ACCURACY: {accuracy} -RECALL: {recall} -PRECISION: {precision} -FALSE DISCOVERY RATE: {false_discovery_rate} -MISS RATE: {miss_rate} +ACCURACY : {accuracy} +RECALL : {recall} +PRECISION : {precision} +FALSE DISCOVERY RATE : {false_discovery_rate} +MISS RATE : {miss_rate} """ _template_summary_part1 = """SUMMARY ------- -GT num_units: {num_gt} -TESTED num_units: {num_tested} -num_well_detected: {num_well_detected} -num_redundant: {num_redundant} -num_overmerged: {num_overmerged} +GT num_units : {num_gt} +TESTED num_units : {num_tested} +num_well_detected : {num_well_detected} +num_redundant : {num_redundant} +num_overmerged : {num_overmerged} """ _template_summary_part2 = """num_false_positive_units {num_false_positive_units} -num_bad: {num_bad} +num_bad : {num_bad} """ @@ -700,15 +700,15 @@ class TemplateComparison(BasePairComparison, MixinTemplateComparison): The first SortingAnalyzer to get templates to compare sorting_analyzer_2 : SortingAnalyzer The second SortingAnalyzer to get templates to compare - unit_ids1 : list, default: None + unit_ids1 : list, default : None List of units from sorting_analyzer_1 to compare - unit_ids2 : list, default: None + unit_ids2 : list, default : None List of units from sorting_analyzer_2 to compare - similarity_method : str, default: "cosine_similarity" + similarity_method : str, default : "cosine_similarity" Method for the similaroty matrix - sparsity_dict : dict, default: None + sparsity_dict : dict, default : None Dictionary for sparsity - verbose : bool, default: False + verbose : bool, default : False If True, output is verbose Returns diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 53411a5d19..78a8e8c84a 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -188,9 +188,9 @@ def get_num_samples(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index to retrieve the number of samples for. - For multi-segment objects, it is required, default: None + For multi-segment objects, it is required, default : None With single segment recording returns the number of samples in the segment Returns @@ -223,9 +223,9 @@ def get_duration(self, segment_index=None) -> float: Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The sample index to retrieve the duration for. - For multi-segment objects, it is required, default: None + For multi-segment objects, it is required, default : None With single segment recording returns the duration of the single segment Returns @@ -256,9 +256,9 @@ def get_memory_size(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The index of the segment for which the memory size should be calculated. - For multi-segment objects, it is required, default: None + For multi-segment objects, it is required, default : None With single segment recording returns the memory size of the single segment Returns @@ -301,20 +301,20 @@ def get_traces( Parameters ---------- - segment_index : int | None, default: None - The segment index to get traces from. If recording is multi-segment, it is required, default: None - start_frame : int | None, default: None - The start frame. If None, 0 is used, default: None - end_frame : int | None, default: None - The end frame. If None, the number of samples in the segment is used, default: None - channel_ids : list | np.array | tuple | None, default: None - The channel ids. If None, all channels are used, default: None - order : "C" | "F" | None, default: None + segment_index : int | None, default : None + The segment index to get traces from. If recording is multi-segment, it is required, default : None + start_frame : int | None, default : None + The start frame. If None, 0 is used, default : None + end_frame : int | None, default : None + The end frame. If None, the number of samples in the segment is used, default : None + channel_ids : list | np.array | tuple | None, default : None + The channel ids. If None, all channels are used, default : None + order : "C" | "F" | None, default : None The order of the traces ("C" | "F"). If None, traces are returned as they are - return_scaled : bool, default: False + return_scaled : bool, default : False If True and the recording has scaling (gain_to_uV and offset_to_uV properties), traces are scaled to uV - cast_unsigned : bool, default: False + cast_unsigned : bool, default : False If True and the traces are unsigned, they are cast to integer and centered (an offset of (2**nbits) is subtracted) @@ -399,9 +399,9 @@ def get_time_info(self, segment_index=None) -> dict: dict A dictionary containing the following key-value pairs: - - "sampling_frequency": The sampling frequency of the RecordingSegment. - - "t_start": The start time of the RecordingSegment. - - "time_vector": The time vector of the RecordingSegment. + - "sampling_frequency" : The sampling frequency of the RecordingSegment. + - "t_start" : The start time of the RecordingSegment. + - "time_vector" : The time vector of the RecordingSegment. Notes ----- @@ -424,7 +424,7 @@ def get_times(self, segment_index=None): Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index (required for multi-segment) Returns @@ -442,7 +442,7 @@ def has_time_vector(self, segment_index=None): Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index (required for multi-segment) Returns @@ -462,9 +462,9 @@ def set_times(self, times, segment_index=None, with_warning=True): ---------- times : 1d np.array The time vector - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index (required for multi-segment) - with_warning : bool, default: True + with_warning : bool, default : True If True, a warning is printed """ segment_index = self._check_segment_index(segment_index) @@ -772,9 +772,9 @@ def get_times_kwargs(self) -> dict: dict A dictionary containing the following key-value pairs: - - "sampling_frequency": The sampling frequency of the RecordingSegment. - - "t_start": The start time of the RecordingSegment. - - "time_vector": The time vector of the RecordingSegment. + - "sampling_frequency" : The sampling frequency of the RecordingSegment. + - "t_start" : The start time of the RecordingSegment. + - "time_vector" : The time vector of the RecordingSegment. Notes ----- @@ -814,7 +814,7 @@ def get_num_samples(self) -> int: """Returns the number of samples in this signal segment Returns: - SampleIndex: Number of samples in the signal segment + SampleIndex : Number of samples in the signal segment """ # must be implemented in subclass raise NotImplementedError @@ -830,16 +830,16 @@ def get_traces( Parameters ---------- - start_frame: int | None, default: None + start_frame : int | None, default : None start sample index, or zero if None - end_frame: int | None, default: None + end_frame : int | None, default : None end_sample, or number of samples if None - channel_indices: list | np.array | tuple | None, default: None + channel_indices : list | np.array | tuple | None, default : None Indices of channels to return, or all channels if None Returns ------- - traces: np.ndarray + traces : np.ndarray Array of traces, num_samples x num_channels """ # must be implemented in subclass diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index d1e0460e3c..81df759a31 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -235,14 +235,14 @@ def get_snippets( Parameters ---------- - indices: list[int] + indices : list[int] Indices of the snippets to return - channel_indices: Union[list, None], default: None + channel_indices : Union[list, None], default : None Indices of channels to return, or all channels if None Returns ------- - snippets: np.ndarray + snippets : np.ndarray Array of snippets, num_snippets x num_samples x num_channels """ raise NotImplementedError @@ -251,7 +251,7 @@ def get_num_snippets(self): """Returns the number of snippets in this segment Returns: - SampleIndex: Number of snippets in the segment + SampleIndex : Number of snippets in the segment """ raise NotImplementedError @@ -259,7 +259,7 @@ def get_frames(self, indices): """Returns the frames of the snippets in this segment Returns: - SampleIndex: Number of samples in the segment + SampleIndex : Number of samples in the segment """ raise NotImplementedError @@ -269,14 +269,14 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: Union[int, None], default: None + start_frame : Union[int, None], default : None start sample index, or zero if None - end_frame: Union[int, None], default: None + end_frame : Union[int, None], default : None end_sample, or number of samples if None Returns ------- - snippets: slice + snippets : slice slice of selected snippets """ raise NotImplementedError diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index fc0d5ba0d5..85f4f6fb5c 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -94,7 +94,7 @@ def get_num_samples(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index to retrieve the number of samples for. For multi-segment objects, it is required @@ -187,7 +187,7 @@ def register_recording(self, recording, check_spike_frames=True): recording : BaseRecording Recording with the same number of segments as current sorting. Assigned to self._recording. - check_spike_frames : bool, default: True + check_spike_frames : bool, default : True If True, assert for each segment that all spikes are within the recording's range. """ assert np.isclose( @@ -320,8 +320,8 @@ def count_num_spikes_per_unit(self, outputs="dict"): Parameters ---------- - outputs: "dict" | "array", default: "dict" - Control the type of the returned object: a dict (keys are unit_ids) or an numpy array. + outputs : "dict" | "array", default : "dict" + Control the type of the returned object : a dict (keys are unit_ids) or an numpy array. Returns ------- @@ -374,7 +374,7 @@ def count_total_num_spikes(self) -> int: Returns ------- - total_num_spikes: int + total_num_spikes : int The total number of spike """ return self.to_spike_vector().size @@ -388,7 +388,7 @@ def select_units(self, unit_ids, renamed_unit_ids=None) -> BaseSorting: ---------- unit_ids : numpy.array or list List of unit ids to keep - renamed_unit_ids : numpy.array or list, default: None + renamed_unit_ids : numpy.array or list, default : None If given, the kept unit ids are renamed Returns @@ -519,7 +519,7 @@ def precompute_spike_trains(self, from_spike_vector=None): Parameters ---------- - from_spike_vector: None | bool, default: None + from_spike_vector : None | bool, default : None If None, then it is automatic depending on whether the spike vector is cached. If True, will compute it from the spike vector. If False, will call `get_unit_spike_train` for each segment for each unit. @@ -560,20 +560,20 @@ def to_spike_vector( Parameters ---------- - concatenated: bool, default: True + concatenated : bool, default : True With concatenated=True the output is one numpy "spike vector" with spikes from all segments. With concatenated=False the output is a list "spike vector" by segment. - extremum_channel_inds: None or dict, default: None + extremum_channel_inds : None or dict, default : None If a dictionnary of unit_id to channel_ind is given then an extra field "channel_index". This can be convinient for computing spikes postion after sorter. This dict can be computed with `get_template_extremum_channel(we, outputs="index")` - use_cache: bool, default: True + use_cache : bool, default : True When True the spikes vector is cached as an attribute of the object (`_cached_spike_vector`). This caching only occurs when extremum_channel_inds=None. Returns ------- - spikes: np.array + spikes : np.array Structured numpy array ("sample_index", "unit_index", "segment_index") with all spikes Or ("sample_index", "unit_index", "segment_index", "channel_index") if extremum_channel_inds is given @@ -685,7 +685,7 @@ def to_multiprocessing(self, n_jobs): Parameters ---------- - n_jobs: int + n_jobs : int The number of jobs. Returns ------- @@ -727,8 +727,8 @@ def get_unit_spike_train( Parameters ---------- unit_id - start_frame: int, default: None - end_frame: int, default: None + start_frame : int, default : None + end_frame : int, default : None Returns ------- diff --git a/src/spikeinterface/core/binaryfolder.py b/src/spikeinterface/core/binaryfolder.py index 5e8c371abd..ec9bdfcc5e 100644 --- a/src/spikeinterface/core/binaryfolder.py +++ b/src/spikeinterface/core/binaryfolder.py @@ -17,11 +17,11 @@ class BinaryFolderRecording(BinaryRecordingExtractor): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Returns ------- - recording: BinaryFolderRecording + recording : BinaryFolderRecording The recording """ diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index eaf81708ea..f2e4762c2c 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -17,29 +17,29 @@ class BinaryRecordingExtractor(BaseRecording): Parameters ---------- - file_paths: str or Path or list + file_paths : str or Path or list Path to the binary file - sampling_frequency: float + sampling_frequency : float The sampling frequency - num_channels: int + num_channels : int Number of channels - num_chan: int [deprecated, use num_channels instead, will be removed as early as v0.100.0] + num_chan : int [deprecated, use num_channels instead, will be removed as early as v0.100.0] Number of channels - dtype: str or dtype + dtype : str or dtype The dtype of the binary file - time_axis: int, default: 0 + time_axis : int, default : 0 The axis of the time dimension - t_starts: None or list of float, default: None + t_starts : None or list of float, default : None Times in seconds of the first sample for each segment - channel_ids: list, default: None + channel_ids : list, default : None A list of channel ids - file_offset: int, default: 0 + file_offset : int, default : 0 Number of bytes in the file to offset by during memmap instantiation. - gain_to_uV: float or array-like, default: None + gain_to_uV : float or array-like, default : None The gain to apply to the traces - offset_to_uV: float or array-like, default: None + offset_to_uV : float or array-like, default : None The offset to apply to the traces - is_filtered: bool or None, default: None + is_filtered : bool or None, default : None If True, the recording is assumed to be filtered. If None, is_filtered is not set. Notes @@ -48,7 +48,7 @@ class BinaryRecordingExtractor(BaseRecording): Returns ------- - recording: BinaryRecordingExtractor + recording : BinaryRecordingExtractor The recording Extractor """ @@ -137,11 +137,11 @@ def write_recording(recording, file_paths, dtype=None, **job_kwargs): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object to be saved in .dat format - file_paths: str + file_paths : str The path to the file. - dtype: dtype, default: None + dtype : dtype, default : None Type of the saved data {} """ @@ -191,7 +191,7 @@ def get_num_samples(self) -> int: """Returns the number of samples in this signal block Returns: - SampleIndex: Number of samples in the signal block + SampleIndex : Number of samples in the signal block """ return self.num_samples diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 6499fb145f..3901d0422e 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -17,22 +17,22 @@ from threadpoolctl import threadpool_limits -_shared_job_kwargs_doc = """**job_kwargs: keyword arguments for parallel processing: +_shared_job_kwargs_doc = """**job_kwargs : keyword arguments for parallel processing: * chunk_duration or chunk_size or chunk_memory or total_memory - - chunk_size: int + - chunk_size : int Number of samples per chunk - - chunk_memory: str + - chunk_memory : str Memory usage for each job (e.g. "100M", "1G", "500MiB", "2GiB") - - total_memory: str + - total_memory : str Total memory usage (e.g. "500M", "2G") - chunk_duration : str or float or None Chunk duration in s if float or with units if str (e.g. "1s", "500ms") - * n_jobs: int | float + * n_jobs : int | float Number of jobs to use. With -1 the number of jobs is the same as number of cores. Using a float between 0 and 1 will use that fraction of the total cores. - * progress_bar: bool + * progress_bar : bool If True, a progress bar is printed - * mp_context: "fork" | "spawn" | None, default: None + * mp_context : "fork" | "spawn" | None, default : None Context for multiprocessing. It can be None, "fork" or "spawn". Note that "fork" is only safely available on LINUX systems """ @@ -194,24 +194,24 @@ def ensure_chunk_size( "chunk_size" is the traces.shape[0] for each worker. Flexible chunk_size setter with 3 ways: - * "chunk_size": is the length in sample for each chunk independently of channel count and dtype. - * "chunk_memory": total memory per chunk per worker - * "total_memory": total memory over all workers. + * "chunk_size" : is the length in sample for each chunk independently of channel count and dtype. + * "chunk_memory" : total memory per chunk per worker + * "total_memory" : total memory over all workers. If chunk_size/chunk_memory/total_memory are all None then there is no chunk computing and the full trace is retrieved at once. Parameters ---------- - chunk_size: int or None + chunk_size : int or None size for one chunk per job - chunk_memory: str or None + chunk_memory : str or None must end with "k", "M", "G", etc for decimal units and "ki", "Mi", "Gi", etc for binary units. (e.g. "1k", "500M", "2G", "1ki", "500Mi", "2Gi") - total_memory: str or None + total_memory : str or None must end with "k", "M", "G", etc for decimal units and "ki", "Mi", "Gi", etc for binary units. (e.g. "1k", "500M", "2G", "1ki", "500Mi", "2Gi") - chunk_duration: None or float or str + chunk_duration : None or float or str Units are second if float. If str then the str must contain units(e.g. "1s", "500ms") """ @@ -272,47 +272,47 @@ class ChunkRecordingExecutor: Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording to be processed - func: function + func : function Function that runs on each chunk - init_func: function + init_func : function Initializer function to set the global context (accessible by "func") - init_args: tuple + init_args : tuple Arguments for init_func - verbose: bool + verbose : bool If True, output is verbose - job_name: str, default: "" + job_name : str, default : "" Job name - handle_returns: bool, default: False + handle_returns : bool, default : False If True, the function can return values - gather_func: None or callable, default: None + gather_func : None or callable, default : None Optional function that is called in the main thread and retrieves the results of each worker. This function can be used instead of `handle_returns` to implement custom storage on-the-fly. - n_jobs: int, default: 1 + n_jobs : int, default : 1 Number of jobs to be used. Use -1 to use as many jobs as number of cores - total_memory: str, default: None + total_memory : str, default : None Total memory (RAM) to use (e.g. "1G", "500M") - chunk_memory: str, default: None + chunk_memory : str, default : None Memory per chunk (RAM) to use (e.g. "1G", "500M") - chunk_size: int or None, default: None + chunk_size : int or None, default : None Size of each chunk in number of samples. If "total_memory" or "chunk_memory" are used, it is ignored. chunk_duration : str or float or None Chunk duration in s if float or with units if str (e.g. "1s", "500ms") - mp_context : "fork" | "spawn" | None, default: None + mp_context : "fork" | "spawn" | None, default : None "fork" or "spawn". If None, the context is taken by the recording.get_preferred_mp_context(). "fork" is only safely available on LINUX systems. - max_threads_per_process: int or None, default: None + max_threads_per_process : int or None, default : None Limit the number of thread per process using threadpoolctl modules. This used only when n_jobs>1 If None, no limits. - progress_bar: bool, default: False + progress_bar : bool, default : False If True, a progress bar is printed to monitor the progress of the process Returns ------- - res: list + res : list If "handle_returns" is True, the results for each chunk process """ diff --git a/src/spikeinterface/core/npyfoldersnippets.py b/src/spikeinterface/core/npyfoldersnippets.py index 5c1078996c..514a56fdf5 100644 --- a/src/spikeinterface/core/npyfoldersnippets.py +++ b/src/spikeinterface/core/npyfoldersnippets.py @@ -17,12 +17,12 @@ class NpyFolderSnippets(NpySnippetsExtractor): Parameters ---------- - folder_path: str or Path + folder_path : str or Path The path to the folder Returns ------- - snippets: NpyFolderSnippets + snippets : NpyFolderSnippets The snippets """ diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 00b4d9efff..06e6cf75c8 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -27,13 +27,13 @@ class NumpyRecording(BaseRecording): Parameters ---------- - traces_list: list of array or array (if mono segment) + traces_list : list of array or array (if mono segment) The traces to instantiate a mono or multisegment Recording - sampling_frequency: float + sampling_frequency : float The sampling frequency in Hz - t_starts: None or list of float + t_starts : None or list of float Times in seconds of the first sample for each segment - channel_ids: list + channel_ids : list An optional list of channel_ids. If None, linear channels are assumed """ @@ -127,19 +127,19 @@ class SharedMemoryRecording(BaseRecording): Parameters ---------- - shm_names: list + shm_names : list List of sharedmem names for each segment - shape_list: list + shape_list : list List of shape of sharedmem buffer for each segment The first dimension is the number of samples, the second is the number of channels. Note that the number of channels must be the same for all segments - sampling_frequency: float + sampling_frequency : float The sampling frequency in Hz - t_starts: None or list of float + t_starts : None or list of float Times in seconds of the first sample for each segment - channel_ids: list + channel_ids : list An optional list of channel_ids. If None, linear channels are assumed - main_shm_owner: bool, default: True + main_shm_owner : bool, default : True If True, the main instance will unlink the sharedmem buffer when deleted """ @@ -246,11 +246,11 @@ class NumpySorting(BaseSorting): Parameters ---------- - spikes: numpy.array + spikes : numpy.array A numpy vector, the one given by Sorting.to_spike_vector(). - sampling_frequency: float + sampling_frequency : float The sampling frequency in Hz - channel_ids: list + channel_ids : list A list of unit_ids. """ @@ -302,11 +302,11 @@ def from_times_labels(times_list, labels_list, sampling_frequency, unit_ids=None Parameters ---------- - times_list: list of array (or array) + times_list : list of array (or array) An array of spike times (in frames) - labels_list: list of array (or array) + labels_list : list of array (or array) An array of spike labels corresponding to the given times - unit_ids: list or None, default: None + unit_ids : list or None, default : None The explicit list of unit_ids that should be extracted from labels_list If None, then it will be np.unique(labels_list) """ @@ -352,7 +352,7 @@ def from_unit_dict(units_dict_list, sampling_frequency) -> "NumpySorting": Parameters ---------- - dict_list: list of dict + dict_list : list of dict """ if isinstance(units_dict_list, dict): units_dict_list = [units_dict_list] @@ -445,7 +445,7 @@ def from_peaks(peaks, sampling_frequency, unit_ids) -> "NumpySorting": Peaks array as returned by the 'detect_peaks()' function sampling_frequency : float the sampling frequency in Hz - unit_ids: np.array + unit_ids : np.array The unit_ids vector which is generally the channel_ids but can be different. Returns @@ -593,14 +593,14 @@ class NumpySnippets(BaseSnippets): Parameters ---------- - snippets_list: list of array or array (if mono segment) + snippets_list : list of array or array (if mono segment) The snippets to instantiate a mono or multisegment basesnippet - spikesframes_list: list of array or array (if mono segment) + spikesframes_list : list of array or array (if mono segment) Frame of each snippet - sampling_frequency: float + sampling_frequency : float The sampling frequency in Hz - channel_ids: list + channel_ids : list An optional list of channel_ids. If None, linear channels are assumed """ @@ -666,14 +666,14 @@ def get_snippets( Parameters ---------- - indices: list[int] + indices : list[int] Indices of the snippets to return - channel_indices: Union[list, None], default: None + channel_indices : Union[list, None], default : None Indices of channels to return, or all channels if None Returns ------- - snippets: np.ndarray + snippets : np.ndarray Array of snippets, num_snippets x num_samples x num_channels """ if indices is None: @@ -689,13 +689,13 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame: Union[int, None], default: None + start_frame : Union[int, None], default : None start sample index, or zero if None - end_frame: Union[int, None], default: None + end_frame : Union[int, None], default : None end_sample, or number of samples if None Returns ------- - snippets: slice + snippets : slice slice of selected snippets """ # must be implemented in subclass @@ -713,7 +713,7 @@ def get_frames(self, indices=None): """Returns the frames of the snippets in this segment Returns: - SampleIndex: Number of samples in the segment + SampleIndex : Number of samples in the segment """ if indices is None: return self._spikestimes diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index 50bc8ab032..3f7fda694f 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -27,16 +27,16 @@ def read_binary_recording(file, num_channels, dtype, time_axis=0, offset=0): Parameters ---------- - file: str + file : str File name - num_channels: int + num_channels : int Number of channels - dtype: dtype + dtype : dtype dtype of the file - time_axis: 0 or 1, default: 0 + time_axis : 0 or 1, default : 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - offset: int, default: 0 + offset : int, default : 0 number of offset bytes """ @@ -85,21 +85,21 @@ def write_binary_recording( Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object to be saved in .dat format - file_path: str or list[str] + file_path : str or list[str] The path to the file. - dtype: dtype or None, default: None + dtype : dtype or None, default : None Type of the saved data - add_file_extension, bool, default: True + add_file_extension, bool, default : True If True, and the file path does not end in "raw", "bin", or "dat" then "raw" is added as an extension. - byte_offset: int, default: 0 + byte_offset : int, default : 0 Offset in bytes for the binary file (e.g. to write a header). This is useful in case you want to append data to an existing file where you wrote a header or other data before. - auto_cast_uint: bool, default: True + auto_cast_uint : bool, default : True If True, unsigned integers are automatically cast to int if the specified dtype is signed .. deprecated:: 0.103, use the `unsigned_to_signed` function instead. - verbose: bool + verbose : bool This is the verbosity of the ChunkRecordingExecutor {} """ @@ -292,20 +292,20 @@ def write_memory_recording(recording, dtype=None, verbose=False, auto_cast_uint= Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object to be saved in .dat format - dtype: dtype, default: None + dtype : dtype, default : None Type of the saved data - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose (when chunks are used) - auto_cast_uint: bool, default: True + auto_cast_uint : bool, default : True If True, unsigned integers are automatically cast to int if the specified dtype is signed - buffer_type: "auto" | "numpy" | "sharedmem" + buffer_type : "auto" | "numpy" | "sharedmem" {} Returns --------- - arrays: one array per segment + arrays : one array per segment """ job_kwargs = fix_job_kwargs(job_kwargs) @@ -382,34 +382,34 @@ def write_to_h5_dataset_format( Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object to be saved in .dat format - dataset_path: str + dataset_path : str Path to dataset in the h5 file (e.g. "/dataset") - segment_index: int + segment_index : int index of segment - save_path: str, default: None + save_path : str, default : None The path to the file. - file_handle: file handle, default: None + file_handle : file handle, default : None The file handle to dump data. This can be used to append data to an header. In case file_handle is given, the file is NOT closed after writing the binary data. - time_axis: 0 or 1, default: 0 + time_axis : 0 or 1, default : 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - single_axis: bool, default: False + single_axis : bool, default : False If True, a single-channel recording is saved as a one dimensional array - dtype: dtype, default: None + dtype : dtype, default : None Type of the saved data - chunk_size: None or int, default: None + chunk_size : None or int, default : None Number of chunks to save the file in. This avoids too much memory consumption for big files. If None and "chunk_memory" is given, the file is saved in chunks of "chunk_memory" MB - chunk_memory: None or str, default: "500M" + chunk_memory : None or str, default : "500M" Chunk size in bytes must end with "k", "M" or "G" - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose (when chunks are used) - auto_cast_uint: bool, default: True + auto_cast_uint : bool, default : True If True, unsigned integers are automatically cast to int if the specified dtype is signed - return_scaled : bool, default: False + return_scaled : bool, default : False If True and the recording has scaling (gain_to_uV and offset_to_uV properties), traces are dumped to uV """ @@ -525,24 +525,24 @@ def get_random_data_chunks( Parameters ---------- - recording: BaseRecording + recording : BaseRecording The recording to get random chunks from - return_scaled: bool, default: False + return_scaled : bool, default : False If True, returned chunks are scaled to uV - num_chunks_per_segment: int, default: 20 + num_chunks_per_segment : int, default : 20 Number of chunks per segment - chunk_size: int, default: 10000 + chunk_size : int, default : 10000 Size of a chunk in number of frames - concatenated: bool, default: True + concatenated : bool, default : True If True chunk are concatenated along time axis - seed: int, default: 0 + seed : int, default : 0 Random seed - margin_frames: int, default: 0 + margin_frames : int, default : 0 Margin in number of frames to avoid edge effects Returns ------- - chunk_list: np.array + chunk_list : np.array Array of concatenate chunks per segment """ # TODO: if segment have differents length make another sampling that dependant on the length of the segment @@ -602,18 +602,18 @@ def get_closest_channels(recording, channel_ids=None, num_channels=None): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to get closest channels - channel_ids: list + channel_ids : list List of channels ids to compute there near neighborhood - num_channels: int, default: None + num_channels : int, default : None Maximum number of neighborhood channels to return Returns ------- closest_channels_inds : array (2d) Closest channel indices in ascending order for each channel id given in input - dists: array (2d) + dists : array (2d) Distance in ascending order for each channel id given in input """ if channel_ids is None: @@ -651,20 +651,20 @@ def get_noise_levels( Parameters ---------- - recording: BaseRecording + recording : BaseRecording The recording extractor to get noise levels - return_scaled: bool + return_scaled : bool If True, returned noise levels are scaled to uV - method: "mad" | "std", default: "mad" + method : "mad" | "std", default : "mad" The method to use to estimate noise levels - force_recompute: bool + force_recompute : bool If True, noise levels are recomputed even if they are already stored in the recording extractor - random_chunk_kwargs: dict + random_chunk_kwargs : dict Kwargs for get_random_data_chunks Returns ------- - noise_levels: array + noise_levels : array Noise levels for each channel """ @@ -820,11 +820,11 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), The input recording channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str, tuple, or list, default: ('x', 'y') + dimensions : str, tuple, or list, default : ('x', 'y') If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity - flip: bool, default: False + flip : bool, default : False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. diff --git a/src/spikeinterface/core/sortinganalyzer.py b/src/spikeinterface/core/sortinganalyzer.py index e6c191617a..838905f187 100644 --- a/src/spikeinterface/core/sortinganalyzer.py +++ b/src/spikeinterface/core/sortinganalyzer.py @@ -55,30 +55,30 @@ def create_sorting_analyzer( Parameters ---------- - sorting: Sorting + sorting : Sorting The sorting object - recording: Recording + recording : Recording The recording object - folder: str or Path or None, default: None + folder : str or Path or None, default : None The folder where waveforms are cached - format: "memory | "binary_folder" | "zarr", default: "memory" + format : "memory | "binary_folder" | "zarr", default : "memory" The mode to store waveforms. If "folder", waveforms are stored on disk in the specified folder. The "folder" argument must be specified in case of mode "folder". If "memory" is used, the waveforms are stored in RAM. Use this option carefully! - sparse: bool, default: True + sparse : bool, default : True If True, then a sparsity mask is computed using the `estimate_sparsity()` function using a few spikes to get an estimate of dense templates to create a ChannelSparsity object. Then, the sparsity will be propagated to all ResultExtention that handle sparsity (like wavforms, pca, ...) You can control `estimate_sparsity()` : all extra arguments are propagated to it (included job_kwargs) - sparsity: ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None The sparsity used to compute waveforms. If this is given, `sparse` is ignored. - return_scaled: bool, default: True - All extensions that play with traces will use this global return_scaled: "waveforms", "noise_levels", "templates". + return_scaled : bool, default : True + All extensions that play with traces will use this global return_scaled : "waveforms", "noise_levels", "templates". This prevent return_scaled being differents from different extensions and having wrong snr for instance. Returns ------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer The SortingAnalyzer object Examples @@ -150,14 +150,14 @@ def load_sorting_analyzer(folder, load_extensions=True, format="auto"): ---------- folder : str or Path The folder / zarr folder where the waveform extractor is stored - load_extensions : bool, default: True + load_extensions : bool, default : True Load all extensions or not. - format: "auto" | "binary_folder" | "zarr" + format : "auto" | "binary_folder" | "zarr" The format of the folder. Returns ------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer The loaded SortingAnalyzer """ @@ -685,7 +685,7 @@ def save_as(self, format="memory", folder=None) -> "SortingAnalyzer": ---------- folder : str or Path The output waveform folder - format : "binary_folder" | "zarr", default: "binary_folder" + format : "binary_folder" | "zarr", default : "binary_folder" The backend to use for saving the waveforms """ return self._save_or_select(format=format, folder=folder, unit_ids=None) @@ -842,16 +842,16 @@ def compute(self, input, save=True, extension_params=None, verbose=False, **kwar Parameters ---------- - input: str or dict or list + input : str or dict or list The extensions to compute, which can be passed as: * a string: compute one extension. Additional parameters can be passed as key word arguments. * a dict: compute several extensions. The keys are the extension names and the values are dictiopnaries with the extension parameters. * a list: compute several extensions. The list contains the extension names. Additional parameters can be passed with the extension_params argument. - save: bool, default: True + save : bool, default : True If True the extension is saved to disk (only if sorting analyzer format is not "memory") - extension_params: dict or None, default: None + extension_params : dict or None, default : None If input is a list, this parameter can be used to specify parameters for each extension. The extension_params keys must be included in the input list. **kwargs: @@ -859,7 +859,7 @@ def compute(self, input, save=True, extension_params=None, verbose=False, **kwar Returns ------- - extension: SortingAnalyzerExtension | None + extension : SortingAnalyzerExtension | None The extension instance if input is a string, None otherwise. Examples @@ -911,10 +911,10 @@ def compute_one_extension(self, extension_name, save=True, verbose=False, **kwar Parameters ---------- - extension_name: str + extension_name : str The name of the extension. For instance "waveforms", "templates", ... - save: bool, default: True + save : bool, default : True It the extension can be saved then it is saved. If not then the extension will only live in memory as long as the object is deleted. save=False is convenient to try some parameters without changing an already saved extension. @@ -924,7 +924,7 @@ def compute_one_extension(self, extension_name, save=True, verbose=False, **kwar Returns ------- - result_extension: AnalyzerExtension + result_extension : AnalyzerExtension Return the extension instance Examples @@ -980,9 +980,9 @@ def compute_several_extensions(self, extensions, save=True, verbose=False, **job Parameters ---------- - extensions: dict + extensions : dict Keys are extension_names and values are params. - save: bool, default: True + save : bool, default : True It the extension can be saved then it is saved. If not then the extension will only live in memory as long as the object is deleted. save=False is convenient to try some parameters without changing an already saved extension. @@ -1131,7 +1131,7 @@ def load_extension(self, extension_name: str): Parameters ---------- - extension_name: str + extension_name : str The extension name. Returns @@ -1206,12 +1206,12 @@ def get_default_extension_params(self, extension_name: str): Parameters ---------- - extension_name: str + extension_name : str The extension name Returns ------- - default_params: dict + default_params : dict The default parameters for the extension """ return get_default_analyzer_extension_params(extension_name) @@ -1224,12 +1224,12 @@ def _sort_extensions_by_dependency(extensions): Parameters ---------- - extensions: dict + extensions : dict A dict of extensions. Returns ------- - sorted_extensions: dict + sorted_extensions : dict A dict of extensions, with the parents on the left of their children. """ @@ -1341,9 +1341,9 @@ def get_extension_class(extension_name: str, auto_import=True): Parameters ---------- - extension_name: str + extension_name : str The extension name. - auto_import: bool, default: True + auto_import : bool, default : True Auto import the module if the extension class is not registered yet. Returns @@ -1384,12 +1384,12 @@ def get_default_analyzer_extension_params(extension_name: str): Parameters ---------- - extension_name: str + extension_name : str The extension name Returns ------- - default_params: dict + default_params : dict The default parameters for the extension """ import inspect diff --git a/src/spikeinterface/core/sortingfolder.py b/src/spikeinterface/core/sortingfolder.py index 567ad915c9..ef6f524b6c 100644 --- a/src/spikeinterface/core/sortingfolder.py +++ b/src/spikeinterface/core/sortingfolder.py @@ -83,11 +83,11 @@ class NpzFolderSorting(NpzSortingExtractor): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Returns ------- - sorting: NpzFolderSorting + sorting : NpzFolderSorting The sorting """ diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index cefd7bd950..48cba4f0be 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -11,29 +11,29 @@ _sparsity_doc = """ - method: str - * "best_channels": N best channels with the largest amplitude. Use the "num_channels" argument to specify the + method : str + * "best_channels" : N best channels with the largest amplitude. Use the "num_channels" argument to specify the number of channels. - * "radius": radius around the best channel. Use the "radius_um" argument to specify the radius in um - * "snr": threshold based on template signal-to-noise ratio. Use the "threshold" argument + * "radius" : radius around the best channel. Use the "radius_um" argument to specify the radius in um + * "snr" : threshold based on template signal-to-noise ratio. Use the "threshold" argument to specify the SNR threshold (in units of noise levels) - * "ptp": threshold based on the peak-to-peak values on every channels. Use the "threshold" argument + * "ptp" : threshold based on the peak-to-peak values on every channels. Use the "threshold" argument to specify the ptp threshold (in units of noise levels) - * "energy": threshold based on the expected energy that should be present on the channels, + * "energy" : threshold based on the expected energy that should be present on the channels, given their noise levels. Use the "threshold" argument to specify the SNR threshold (in units of noise levels) - * "by_property": sparsity is given by a property of the recording and sorting(e.g. "group"). + * "by_property" : sparsity is given by a property of the recording and sorting(e.g. "group"). Use the "by_property" argument to specify the property name. - peak_sign: str + peak_sign : str Sign of the template to compute best channels ("neg", "pos", "both") - num_channels: int + num_channels : int Number of channels for "best_channels" method - radius_um: float + radius_um : float Radius in um for "radius" method - threshold: float + threshold : float Threshold in SNR "threshold" method - by_property: object + by_property : object Property name for "by_property" method """ @@ -61,11 +61,11 @@ class ChannelSparsity: Parameters ---------- - mask: np.array of bool + mask : np.array of bool The sparsity mask (num_units, num_channels) - unit_ids: list or array + unit_ids : list or array Unit ids vector or list - channel_ids: list or array + channel_ids : list or array Channel ids vector or list Examples @@ -469,7 +469,7 @@ def compute_sparsity( Parameters ---------- - templates_or_sorting_analyzer: Templates | SortingAnalyzer + templates_or_sorting_analyzer : Templates | SortingAnalyzer A Templates or a SortingAnalyzer object. Some methods accept both objects ("best_channels", "radius", ) Other methods require only SortingAnalyzer because internally the recording is needed. @@ -478,7 +478,7 @@ def compute_sparsity( Returns ------- - sparsity: ChannelSparsity + sparsity : ChannelSparsity The estimated sparsity """ @@ -563,31 +563,31 @@ def estimate_sparsity( Parameters ---------- - recording: BaseRecording + recording : BaseRecording The recording - sorting: BaseSorting + sorting : BaseSorting The sorting - num_spikes_for_sparsity: int, default: 100 + num_spikes_for_sparsity : int, default : 100 How many spikes per units to compute the sparsity - ms_before: float, default: 1.0 + ms_before : float, default : 1.0 Cut out in ms before spike time - ms_after: float, default: 2.5 + ms_after : float, default : 2.5 Cut out in ms after spike time - method: "radius" | "best_channels", default: "radius" + method : "radius" | "best_channels", default : "radius" Sparsity method propagated to the `compute_sparsity()` function. Only "radius" or "best_channels" are implemented - peak_sign: "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Sign of the template to compute best channels - radius_um: float, default: 100.0 + radius_um : float, default : 100.0 Used for "radius" method - num_channels: int, default: 5 + num_channels : int, default : 5 Used for "best_channels" method {} Returns ------- - sparsity: ChannelSparsity + sparsity : ChannelSparsity The estimated sparsity """ # Can't be done at module because this is a cyclic import, too bad diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 69706b3bd9..50fba73aef 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -13,14 +13,14 @@ def get_dense_templates_array(one_object: Templates | SortingAnalyzer, return_sc Parameters ---------- - one_object: Templates | SortingAnalyzer + one_object : Templates | SortingAnalyzer The Templates or SortingAnalyzer objects. If SortingAnalyzer, it needs the "templates" extension. - return_scaled: bool, default: True + return_scaled : bool, default : True If True, templates are scaled. Returns ------- - dense_templates: np.ndarray + dense_templates : np.ndarray The dense templates (num_units, num_samples, num_channels) """ if isinstance(one_object, Templates): @@ -65,23 +65,23 @@ def get_template_amplitudes( Parameters ---------- - templates_or_sorting_analyzer: Templates | SortingAnalyzer + templates_or_sorting_analyzer : Templates | SortingAnalyzer A Templates or a SortingAnalyzer object - peak_sign: "neg" | "pos" | "both" + peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode: "extremum" | "at_index" | "peak_to_peak", default: "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" Where the amplitude is computed - * "extremum": take the peak value (max or min depending on `peak_sign`) - * "at_index": take value at `nbefore` index - * "peak_to_peak": take the peak-to-peak amplitude - return_scaled: bool, default True + * "extremum" : take the peak value (max or min depending on `peak_sign`) + * "at_index" : take value at `nbefore` index + * "peak_to_peak" : take the peak-to-peak amplitude + return_scaled : bool, default True The amplitude is scaled or not. - abs_value: bool = True + abs_value : bool = True Whether the extremum amplitude should be returned as an absolute value or not Returns ------- - peak_values: dict + peak_values : dict Dictionary with unit ids as keys and template amplitudes as values """ assert peak_sign in ("both", "neg", "pos"), "'peak_sign' must be 'both', 'neg', or 'pos'" @@ -131,22 +131,22 @@ def get_template_extremum_channel( Parameters ---------- - templates_or_sorting_analyzer: Templates | SortingAnalyzer + templates_or_sorting_analyzer : Templates | SortingAnalyzer A Templates or a SortingAnalyzer object - peak_sign: "neg" | "pos" | "both" + peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode: "extremum" | "at_index" | "peak_to_peak", default: "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" Where the amplitude is computed - * "extremum": take the peak value (max or min depending on `peak_sign`) - * "at_index": take value at `nbefore` index - * "peak_to_peak": take the peak-to-peak amplitude - outputs: "id" | "index", default: "id" - * "id": channel id - * "index": channel index + * "extremum" : take the peak value (max or min depending on `peak_sign`) + * "at_index" : take value at `nbefore` index + * "peak_to_peak" : take the peak-to-peak amplitude + outputs : "id" | "index", default : "id" + * "id" : channel id + * "index" : channel index Returns ------- - extremum_channels: dict + extremum_channels : dict Dictionary with unit ids as keys and extremum channels (id or index based on "outputs") as values """ @@ -188,14 +188,14 @@ def get_template_extremum_channel_peak_shift(templates_or_sorting_analyzer, peak Parameters ---------- - templates_or_sorting_analyzer: Templates | SortingAnalyzer + templates_or_sorting_analyzer : Templates | SortingAnalyzer A Templates or a SortingAnalyzer object - peak_sign: "neg" | "pos" | "both" + peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels Returns ------- - shifts: dict + shifts : dict Dictionary with unit ids as keys and shifts as values """ unit_ids = templates_or_sorting_analyzer.unit_ids @@ -244,22 +244,22 @@ def get_template_extremum_amplitude( Parameters ---------- - templates_or_sorting_analyzer: Templates | SortingAnalyzer + templates_or_sorting_analyzer : Templates | SortingAnalyzer A Templates or a SortingAnalyzer object - peak_sign: "neg" | "pos" | "both" + peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode: "extremum" | "at_index" | "peak_to_peak", default: "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" Where the amplitude is computed * "extremum": take the peak value (max or min depending on `peak_sign`) * "at_index": take value at `nbefore` index * "peak_to_peak": take the peak-to-peak amplitude - abs_value: bool = True + abs_value : bool = True Whether the extremum amplitude should be returned as an absolute value or not Returns ------- - amplitudes: dict + amplitudes : dict Dictionary with unit ids as keys and amplitudes as values """ assert peak_sign in ("both", "neg", "pos"), "'peak_sign' must be 'neg' or 'pos' or 'both'" diff --git a/src/spikeinterface/core/zarrextractors.py b/src/spikeinterface/core/zarrextractors.py index eed0a581bb..7693935ef7 100644 --- a/src/spikeinterface/core/zarrextractors.py +++ b/src/spikeinterface/core/zarrextractors.py @@ -20,14 +20,14 @@ class ZarrRecordingExtractor(BaseRecording): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the zarr root folder - storage_options: dict or None + storage_options : dict or None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. Returns ------- - recording: ZarrRecordingExtractor + recording : ZarrRecordingExtractor The recording Extractor """ @@ -134,7 +134,7 @@ def get_num_samples(self) -> int: """Returns the number of samples in this signal block Returns: - SampleIndex: Number of samples in the signal block + SampleIndex : Number of samples in the signal block """ return self._timeseries.shape[0] @@ -156,15 +156,15 @@ class ZarrSortingExtractor(BaseSorting): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the zarr root file - storage_options: dict or None + storage_options : dict or None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. - zarr_group: str or None, default: None + zarr_group : str or None, default : None Optional zarr group path to load the sorting from. This can be used when the sorting is not stored at the root, but in sub group. Returns ------- - sorting: ZarrSortingExtractor + sorting : ZarrSortingExtractor The sorting Extractor """ @@ -245,14 +245,14 @@ def read_zarr( Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the zarr root file - storage_options: dict or None + storage_options : dict or None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. Returns ------- - extractor: ZarrExtractor + extractor : ZarrExtractor The loaded extractor """ # TODO @alessio : we should have something more explicit in our zarr format to tell which object it is. @@ -294,7 +294,7 @@ def get_default_zarr_compressor(clevel: int = 5): Parameters ---------- - clevel : int, default: 5 + clevel : int, default : 5 Compression level (higher -> more compressed). Minimum 1, maximum 9. By default 5 @@ -329,11 +329,11 @@ def add_sorting_to_zarr_group(sorting: BaseSorting, zarr_group: zarr.hierarchy.G Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The sorting extractor object to be added to the zarr group - zarr_group: zarr.hierarchy.Group + zarr_group : zarr.hierarchy.Group The zarr group - kwargs: dict + kwargs : dict Other arguments passed to the zarr compressor """ from numcodecs import Delta @@ -453,23 +453,23 @@ def add_traces_to_zarr( Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object to be saved in .dat format - zarr_group: zarr.Group + zarr_group : zarr.Group The zarr group to add traces to - dataset_paths: list + dataset_paths : list List of paths to traces datasets in the zarr group - channel_chunk_size: int or None, default: None (chunking in time only) + channel_chunk_size : int or None, default : None (chunking in time only) Channels per chunk - dtype: dtype, default: None + dtype : dtype, default : None Type of the saved data - compressor: zarr compressor or None, default: None + compressor : zarr compressor or None, default : None Zarr compressor - filters: list, default: None + filters : list, default : None List of zarr filters - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose (when chunks are used) - auto_cast_uint: bool, default: True + auto_cast_uint : bool, default : True If True, unsigned integers are automatically cast to int if the specified dtype is signed {} """ diff --git a/src/spikeinterface/curation/auto_merge.py b/src/spikeinterface/curation/auto_merge.py index 05cbbf5f34..d596fd7608 100644 --- a/src/spikeinterface/curation/auto_merge.py +++ b/src/spikeinterface/curation/auto_merge.py @@ -59,53 +59,53 @@ def get_potential_auto_merge( Parameters ---------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer The SortingAnalyzer - minimum_spikes: int, default: 1000 + minimum_spikes : int, default : 1000 Minimum number of spikes for each unit to consider a potential merge. Enough spikes are needed to estimate the correlogram - maximum_distance_um: float, default: 150 + maximum_distance_um : float, default : 150 Maximum distance between units for considering a merge - peak_sign: "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Peak sign used to estimate the maximum channel of a template - bin_ms: float, default: 0.25 + bin_ms : float, default : 0.25 Bin size in ms used for computing the correlogram - window_ms: float, default: 100 + window_ms : float, default : 100 Window size in ms used for computing the correlogram - corr_diff_thresh: float, default: 0.16 + corr_diff_thresh : float, default : 0.16 The threshold on the "correlogram distance metric" for considering a merge. It needs to be between 0 and 1 - template_diff_thresh: float, default: 0.25 + template_diff_thresh : float, default : 0.25 The threshold on the "template distance metric" for considering a merge. It needs to be between 0 and 1 - template_metric: 'l1' + template_metric : 'l1' The metric to be used when comparing templates. Default is l1 norm - censored_period_ms: float, default: 0.3 + censored_period_ms : float, default : 0.3 Used to compute the refractory period violations aka "contamination" - refractory_period_ms: float, default: 1 + refractory_period_ms : float, default : 1 Used to compute the refractory period violations aka "contamination" - sigma_smooth_ms: float, default: 0.6 + sigma_smooth_ms : float, default : 0.6 Parameters to smooth the correlogram estimation - contamination_threshold: float, default: 0.2 + contamination_threshold : float, default : 0.2 Threshold for not taking in account a unit when it is too contaminated - adaptative_window_threshold:: float, default: 0.5 + adaptative_window_threshold : : float, default : 0.5 Parameter to detect the window size in correlogram estimation - censor_correlograms_ms: float, default: 0.15 + censor_correlograms_ms : float, default : 0.15 The period to censor on the auto and cross-correlograms - num_channels: int, default: 5 + num_channels : int, default : 5 Number of channel to use for template similarity computation - num_shift: int, default: 5 + num_shift : int, default : 5 Number of shifts in samles to be explored for template similarity computation - firing_contamination_balance: float, default: 1.5 + firing_contamination_balance : float, default : 1.5 Parameter to control the balance between firing rate and contamination in computing unit "quality score" - extra_outputs: bool, default: False + extra_outputs : bool, default : False If True, an additional dictionary (`outs`) with processed data is returned - steps: None or list of str, default: None + steps : None or list of str, default : None which steps to run (gives flexibility to running just some steps) If None all steps are done. - Pontential steps: "min_spikes", "remove_contaminated", "unit_positions", "correlogram", "template_similarity", + Pontential steps : "min_spikes", "remove_contaminated", "unit_positions", "correlogram", "template_similarity", "check_increase_score". Please check steps explanations above! - template_metric: 'l1', 'l2' or 'cosine' + template_metric : 'l1', 'l2' or 'cosine' The metric to consider when measuring the distances between templates. Default is l1 Returns @@ -258,18 +258,18 @@ def compute_correlogram_diff( Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The sorting object - correlograms_smoothed: array 3d + correlograms_smoothed : array 3d The 3d array containing all cross and auto correlograms (smoothed by a convolution with a gaussian curve) - bins: array + bins : array Bins of the correlograms win_sized: TODO - adaptative_window_threshold: float + adaptative_window_threshold : float TODO - pair_mask: None or boolean array + pair_mask : None or boolean array A bool matrix of size (num_units, num_units) to select which pair to compute. @@ -365,9 +365,9 @@ def get_unit_adaptive_window(auto_corr: np.ndarray, threshold: float): Parameters ---------- - auto_corr: np.ndarray + auto_corr : np.ndarray Correlogram used for adaptive window. - threshold: float + threshold : float Minimum threshold of correlogram (all peaks under this threshold are discarded). Returns @@ -410,21 +410,21 @@ def compute_templates_diff( The sorting object templates_array : np.array The templates array (num_units, num_samples, num_channels). - num_channels: int, default: 5 + num_channels : int, default : 5 Number of channel to use for template similarity computation - num_shift: int, default: 5 + num_shift : int, default : 5 Number of shifts in samles to be explored for template similarity computation - pair_mask: None or boolean array + pair_mask : None or boolean array A bool matrix of size (num_units, num_units) to select which pair to compute. - template_metric: 'l1', 'l2' or 'cosine' + template_metric : 'l1', 'l2' or 'cosine' The metric to consider when measuring the distances between templates. Default is l1 - sparsity: None or ChannelSparsity + sparsity : None or ChannelSparsity Optionaly a ChannelSparsity object. Returns ------- - templates_diff: np.array + templates_diff : np.array 2D array with template differences """ unit_ids = sorting.unit_ids diff --git a/src/spikeinterface/curation/curationsorting.py b/src/spikeinterface/curation/curationsorting.py index 1635a915fe..979b70679c 100644 --- a/src/spikeinterface/curation/curationsorting.py +++ b/src/spikeinterface/curation/curationsorting.py @@ -18,17 +18,17 @@ class CurationSorting: Parameters ---------- - parent_sorting: Recording + parent_sorting : Recording The recording object - properties_policy: "keep" | "remove", default: "keep" + properties_policy : "keep" | "remove", default : "keep" Policy used to propagate properties after split and merge operation. If "keep" the properties will be passed to the new units (if the original units have the same value). If "remove" the new units will have an empty value for all the properties - make_graph: bool + make_graph : bool True to keep a Networkx graph instance with the curation history Returns ------- - sorting: Sorting + sorting : Sorting Sorting object with the selected units merged """ @@ -67,14 +67,14 @@ def split(self, split_unit_id, indices_list, new_unit_ids=None): Parameters ---------- - split_unit_id: int or str + split_unit_id : int or str The unit to split - indices_list: list or np.array + indices_list : list or np.array A list of index arrays selecting the spikes to split in each segment. Each array can contain more than 2 indices (e.g. for splitting in 3 or more units) and it should be the same length as the spike train (for each segment). If the sorting has only one segment, indices_list can be a single array - new_unit_ids: list[str|int] ot None + new_unit_ids : list[str|int] ot None List of new unit ids. If None, a new unit id is automatically selected """ current_sorting = self._sorting_stages[self._sorting_stages_i] @@ -108,11 +108,11 @@ def merge(self, units_to_merge, new_unit_id=None, delta_time_ms=0.4): Parameters ---------- - units_to_merge: list[str|int] + units_to_merge : list[str|int] List of unit ids to merge - new_unit_id: int or str + new_unit_id : int or str The new unit id. If None, a new unit id is automatically selected - delta_time_ms: float + delta_time_ms : float Number of ms to consider for duplicated spikes. None won't check for duplications """ current_sorting = self._sorting_stages[self._sorting_stages_i] @@ -143,7 +143,7 @@ def remove_units(self, unit_ids): Parameters ---------- - unit_ids: list[str|int] + unit_ids : list[str|int] List of unit ids to remove """ current_sorting = self._sorting_stages[self._sorting_stages_i] @@ -174,7 +174,7 @@ def select_units(self, unit_ids, renamed_unit_ids=None): ---------- unit_ids : list[str|int] List of unit ids to select - renamed_unit_ids : list or None, default: None + renamed_unit_ids : list or None, default : None List of new unit ids to rename the selected units """ new_sorting = self._sorting_stages[self._sorting_stages_i].select_units(unit_ids, renamed_unit_ids) @@ -256,7 +256,7 @@ def draw_graph(self, **kwargs): Parameters ---------- - **kwargs: dict + **kwargs : dict Keyword arguments for Networkx draw function """ assert self._make_graph, "to make a graph use make_graph=True" diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index d32f3ef9b3..7c2c2ada33 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -12,23 +12,23 @@ class MergeUnitsSorting(BaseSorting): Parameters ---------- - parent_sorting: Recording + parent_sorting : Recording The sorting object - units_to_merge: list/tuple of lists/tuples + units_to_merge : list/tuple of lists/tuples A list of lists for every merge group. Each element needs to have at least two elements (two units to merge), but it can also have more (merge multiple units at once). - new_unit_ids: None or list + new_unit_ids : None or list A new unit_ids for merged units. If given, it needs to have the same length as `units_to_merge` - properties_policy: "keep" | "remove", default: "keep" + properties_policy : "keep" | "remove", default : "keep" Policy used to propagate properties. If "keep" the properties will be passed to the new units (if the units_to_merge have the same value). If "remove" the new units will have an empty value for all the properties of the new unit. - delta_time_ms: float or None + delta_time_ms : float or None Number of ms to consider for duplicated spikes. None won't check for duplications Returns ------- - sorting: Sorting + sorting : Sorting Sorting object with the selected units merged """ diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 9e33f7bcb8..00d7b5d3c3 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -14,11 +14,11 @@ class RemoveDuplicatedSpikesSorting(BaseSorting): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The parent sorting. - censored_period_ms: float + censored_period_ms : float The censored period to consider 2 spikes to be duplicated (in ms). - method: "keep_first" | "keep_last" | "keep_first_iterative" | "keep_last_iterative" | "random", default: "keep_first" + method : "keep_first" | "keep_last" | "keep_first_iterative" | "keep_last_iterative" | "random", default : "keep_first" Method used to remove the duplicated spikes. If method = "random", will randomly choose to remove the first or last spike. If method = "keep_first", for each ISI violation, will remove the second spike. @@ -30,7 +30,7 @@ class RemoveDuplicatedSpikesSorting(BaseSorting): Returns ------- - sorting_without_duplicated_spikes: Remove_DuplicatedSpikesSorting + sorting_without_duplicated_spikes : Remove_DuplicatedSpikesSorting The sorting without any duplicated spikes. """ diff --git a/src/spikeinterface/curation/remove_excess_spikes.py b/src/spikeinterface/curation/remove_excess_spikes.py index 6f5ca9c6c4..0ae7a59fc6 100644 --- a/src/spikeinterface/curation/remove_excess_spikes.py +++ b/src/spikeinterface/curation/remove_excess_spikes.py @@ -13,14 +13,14 @@ class RemoveExcessSpikesSorting(BaseSorting): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The parent sorting. - recording: BaseRecording + recording : BaseRecording The recording to use to get the number of samples. Returns ------- - sorting_without_excess_spikes: RemoveExcessSpikesSorting + sorting_without_excess_spikes : RemoveExcessSpikesSorting The sorting without any excess spikes. """ @@ -92,14 +92,14 @@ def remove_excess_spikes(sorting, recording): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The parent sorting. - recording: BaseRecording + recording : BaseRecording The recording to use to get the number of samples. Returns ------- - sorting_without_excess_spikes: Sorting + sorting_without_excess_spikes : Sorting The sorting without any excess spikes. """ if has_exceeding_spikes(recording=recording, sorting=sorting): diff --git a/src/spikeinterface/curation/remove_redundant.py b/src/spikeinterface/curation/remove_redundant.py index dd646c4a87..5ceac0d849 100644 --- a/src/spikeinterface/curation/remove_redundant.py +++ b/src/spikeinterface/curation/remove_redundant.py @@ -37,26 +37,26 @@ def remove_redundant_units( If SortingAnalyzer, the spike trains can be optionally realigned using the peak shift in the template to improve the matching procedure. If BaseSorting, the spike trains are not aligned. - align : bool, default: False + align : bool, default : False If True, spike trains are aligned (if a SortingAnalyzer is used) - delta_time : float, default: 0.4 + delta_time : float, default : 0.4 The time in ms to consider matching spikes - agreement_threshold : float, default: 0.2 + agreement_threshold : float, default : 0.2 Threshold on the agreement scores to flag possible redundant/duplicate units - duplicate_threshold : float, default: 0.8 + duplicate_threshold : float, default : 0.8 Final threshold on the portion of coincident events over the number of spikes above which the unit is removed - remove_strategy: "minimum_shift" | "highest_amplitude" | "max_spikes", default: "minimum_shift" + remove_strategy : "minimum_shift" | "highest_amplitude" | "max_spikes", default : "minimum_shift" Which strategy to remove one of the two duplicated units: - * "minimum_shift": keep the unit with best peak alignment (minimum shift) + * "minimum_shift" : keep the unit with best peak alignment (minimum shift) If shifts are equal then the "highest_amplitude" is used - * "highest_amplitude": keep the unit with the best amplitude on unshifted max. - * "max_spikes": keep the unit with more spikes + * "highest_amplitude" : keep the unit with the best amplitude on unshifted max. + * "max_spikes" : keep the unit with more spikes - peak_sign: "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Used when remove_strategy="highest_amplitude" - extra_outputs: bool, default: False + extra_outputs : bool, default : False If True, will return the redundant pairs. Returns @@ -149,11 +149,11 @@ def find_redundant_units(sorting, delta_time: float = 0.4, agreement_threshold=0 ---------- sorting : BaseSorting The input sorting object - delta_time : float, default: 0.4 + delta_time : float, default : 0.4 The time in ms to consider matching spikes - agreement_threshold : float, default: 0.2 + agreement_threshold : float, default : 0.2 Threshold on the agreement scores to flag possible redundant/duplicate units - duplicate_threshold : float, default: 0.8 + duplicate_threshold : float, default : 0.8 Final threshold on the portion of coincident events over the number of spikes above which the unit is flagged as duplicate/redundant diff --git a/src/spikeinterface/curation/splitunitsorting.py b/src/spikeinterface/curation/splitunitsorting.py index 5854d1b64a..4163d396e9 100644 --- a/src/spikeinterface/curation/splitunitsorting.py +++ b/src/spikeinterface/curation/splitunitsorting.py @@ -13,24 +13,24 @@ class SplitUnitSorting(BaseSorting): Parameters ---------- - parent_sorting: Recording + parent_sorting : Recording The recording object - parent_unit_id: int + parent_unit_id : int Unit id of the unit to split - indices_list: list or np.array + indices_list : list or np.array A list of index arrays selecting the spikes to split in each segment. Each array can contain more than 2 indices (e.g. for splitting in 3 or more units) and it should be the same length as the spike train (for each segment). If the sorting has only one segment, indices_list can be a single array - new_unit_ids: int + new_unit_ids : int Unit ids of the new units to be created - properties_policy: "keep" | "remove", default: "keep" + properties_policy : "keep" | "remove", default : "keep" Policy used to propagate properties. If "keep" the properties will be passed to the new units (if the units_to_merge have the same value). If "remove" the new units will have an empty value for all the properties of the new unit Returns ------- - sorting: Sorting + sorting : Sorting Sorting object with the selected units split """ diff --git a/src/spikeinterface/exporters/report.py b/src/spikeinterface/exporters/report.py index ae3badaa6d..349014c14a 100644 --- a/src/spikeinterface/exporters/report.py +++ b/src/spikeinterface/exporters/report.py @@ -28,19 +28,19 @@ def export_report( Parameters ---------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer A SortingAnalyzer object - output_folder: str + output_folder : str The output folder where the report files are saved - remove_if_exists: bool, default: False + remove_if_exists : bool, default : False If True and the output folder exists, it is removed - format: str, default: "png" + format : str, default : "png" The output figure format (any format handled by matplotlib) - peak_sign: "neg" or "pos", default: "neg" + peak_sign : "neg" or "pos", default : "neg" used to compute amplitudes and metrics - show_figures: bool, default: False + show_figures : bool, default : False If True, figures are shown. If False, figures are closed after saving - force_computation: bool, default: False + force_computation : bool, default : False Force or not some heavy computaion before exporting {} """ diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index a4fd9bd2cb..fbdb91ca7e 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -43,29 +43,29 @@ def export_to_phy( Parameters ---------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer A SortingAnalyzer object - output_folder: str | Path + output_folder : str | Path The output folder where the phy template-gui files are saved - compute_pc_features: bool, default: True + compute_pc_features : bool, default : True If True, pc features are computed - compute_amplitudes: bool, default: True + compute_amplitudes : bool, default : True If True, waveforms amplitudes are computed - sparsity: ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None The sparsity object - copy_binary: bool, default: True + copy_binary : bool, default : True If True, the recording is copied and saved in the phy "output_folder" - remove_if_exists: bool, default: False + remove_if_exists : bool, default : False If True and "output_folder" exists, it is removed and overwritten - peak_sign: "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Used by compute_spike_amplitudes - template_mode: str, default: "average" + template_mode : str, default : "average" Parameter "mode" to be given to SortingAnalyzer.get_template() - dtype: dtype or None, default: None + dtype : dtype or None, default : None Dtype to save binary data - verbose: bool, default: True + verbose : bool, default : True If True, output is verbose - use_relative_path : bool, default: False + use_relative_path : bool, default : False If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r"recording.dat"`). If `copy_binary=False`, then uses a path relative to the `output_folder` If False, uses an absolute path in the `params.py` (ie `dat_path=r"path/to/the/recording.dat"`) {} diff --git a/src/spikeinterface/extractors/cbin_ibl.py b/src/spikeinterface/extractors/cbin_ibl.py index 2d81444a99..76c319a741 100644 --- a/src/spikeinterface/extractors/cbin_ibl.py +++ b/src/spikeinterface/extractors/cbin_ibl.py @@ -22,12 +22,12 @@ class CompressedBinaryIblExtractor(BaseRecording): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to ibl folder. - load_sync_channel: bool, default: False + load_sync_channel : bool, default : False Load or not the last channel (sync). If not then the probe is loaded. - stream_name: str, default: "ap". + stream_name : str, default : "ap". Whether to load AP or LFP band, one of "ap" or "lp". diff --git a/src/spikeinterface/extractors/herdingspikesextractors.py b/src/spikeinterface/extractors/herdingspikesextractors.py index 139d51d62e..7749968257 100644 --- a/src/spikeinterface/extractors/herdingspikesextractors.py +++ b/src/spikeinterface/extractors/herdingspikesextractors.py @@ -22,7 +22,7 @@ class HerdingspikesSortingExtractor(BaseSorting): ---------- folder_path : str or Path Path to the ALF folder. - load_unit_info : bool, default: True + load_unit_info : bool, default : True Whether to load the unit info from the file. Returns diff --git a/src/spikeinterface/extractors/klustaextractors.py b/src/spikeinterface/extractors/klustaextractors.py index 896b0cf4f5..3b6685c30c 100644 --- a/src/spikeinterface/extractors/klustaextractors.py +++ b/src/spikeinterface/extractors/klustaextractors.py @@ -34,7 +34,7 @@ class KlustaSortingExtractor(BaseSorting): ---------- file_or_folder_path : str or Path Path to the ALF folder. - exclude_cluster_groups: list or str, default: None + exclude_cluster_groups : list or str, default : None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). Returns diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index d2848f03d7..7fa089d5b6 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -23,11 +23,11 @@ class MdaRecordingExtractor(BaseRecording): ---------- folder_path : str or Path Path to the MDA folder. - raw_fname: str, default: "raw.mda" + raw_fname : str, default : "raw.mda" File name of raw file - params_fname: str, default: "params.json" + params_fname : str, default : "params.json" File name of params file - geom_fname: str, default: "geom.csv" + geom_fname : str, default : "geom.csv" File name of geom file Returns @@ -82,20 +82,20 @@ def write_recording( Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be saved. - save_path: str or Path + save_path : str or Path The folder to save the Mda files. - params: dictionary + params : dictionary Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary. - raw_fname: str, default: "raw.mda" + raw_fname : str, default : "raw.mda" File name of raw file - params_fname: str, default: "params.json" + params_fname : str, default : "params.json" File name of params file - geom_fname: str, default: "geom.csv" + geom_fname : str, default : "geom.csv" File name of geom file - dtype: dtype or None, default: None + dtype : dtype or None, default : None Data type to be used. If None dtype is same as recording traces. **job_kwargs: Use by job_tools modules to set: @@ -156,7 +156,7 @@ def get_num_samples(self): """Returns the number of samples in this signal block Returns: - SampleIndex: Number of samples in the signal block + SampleIndex : Number of samples in the signal block """ return self._num_samples diff --git a/src/spikeinterface/extractors/neoextractors/alphaomega.py b/src/spikeinterface/extractors/neoextractors/alphaomega.py index e2e66f9e66..1e0876ab3e 100644 --- a/src/spikeinterface/extractors/neoextractors/alphaomega.py +++ b/src/spikeinterface/extractors/neoextractors/alphaomega.py @@ -15,15 +15,15 @@ class AlphaOmegaRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str or Path-like + folder_path : str or Path-like The folder path to the AlphaOmega recordings. - lsx_files: list of strings or None, default: None + lsx_files : list of strings or None, default : None A list of listings files that refers to mpx files to load. - stream_id: {"RAW", "LFP", "SPK", "ACC", "AI", "UD"}, default: "RAW" + stream_id : {"RAW", "LFP", "SPK", "ACC", "AI", "UD"}, default : "RAW" If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/axona.py b/src/spikeinterface/extractors/neoextractors/axona.py index a9a36d44bb..455f3ca2d6 100644 --- a/src/spikeinterface/extractors/neoextractors/axona.py +++ b/src/spikeinterface/extractors/neoextractors/axona.py @@ -15,9 +15,9 @@ class AxonaRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/biocam.py b/src/spikeinterface/extractors/neoextractors/biocam.py index ba9151e4b3..f724834db5 100644 --- a/src/spikeinterface/extractors/neoextractors/biocam.py +++ b/src/spikeinterface/extractors/neoextractors/biocam.py @@ -17,17 +17,17 @@ class BiocamRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - mea_pitch: float, default: None + mea_pitch : float, default : None The inter-electrode distance (pitch) between electrodes. - electrode_width: float, default: None + electrode_width : float, default : None Width of the electrodes in um. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/blackrock.py b/src/spikeinterface/extractors/neoextractors/blackrock.py index 6f899dc364..ac50805e25 100644 --- a/src/spikeinterface/extractors/neoextractors/blackrock.py +++ b/src/spikeinterface/extractors/neoextractors/blackrock.py @@ -19,13 +19,13 @@ class BlackrockRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ @@ -75,15 +75,15 @@ class BlackrockSortingExtractor(NeoBaseSortingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from - sampling_frequency: float, default: None + sampling_frequency : float, default : None The sampling frequency for the sorting extractor. When the signal data is available (.ncs) those files will be used to extract the frequency automatically. Otherwise, the sampling frequency needs to be specified for this extractor to be initialized - stream_id: str, default: None + stream_id : str, default : None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name: str, default: None + stream_name : str, default : None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/ced.py b/src/spikeinterface/extractors/neoextractors/ced.py index ca21783dc0..ca090e596e 100644 --- a/src/spikeinterface/extractors/neoextractors/ced.py +++ b/src/spikeinterface/extractors/neoextractors/ced.py @@ -17,15 +17,15 @@ class CedRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to the smr or smrx file. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - block_index: int, default: None + block_index : int, default : None If there are several blocks, specify the block index you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/intan.py b/src/spikeinterface/extractors/neoextractors/intan.py index e2746cc2bc..421e676679 100644 --- a/src/spikeinterface/extractors/neoextractors/intan.py +++ b/src/spikeinterface/extractors/neoextractors/intan.py @@ -15,13 +15,13 @@ class IntanRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/maxwell.py b/src/spikeinterface/extractors/neoextractors/maxwell.py index b5034a0403..5ca353cd9d 100644 --- a/src/spikeinterface/extractors/neoextractors/maxwell.py +++ b/src/spikeinterface/extractors/neoextractors/maxwell.py @@ -20,20 +20,20 @@ class MaxwellRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to the maxwell h5 file. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. For MaxTwo when there are several wells at the same time you need to specify stream_id='well000' or 'well0001', etc. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. - rec_name: str, default: None + rec_name : str, default : None When the file contains several recordings you need to specify the one you want to extract. (rec_name='rec0000'). - install_maxwell_plugin: bool, default: False + install_maxwell_plugin : bool, default : False If True, install the maxwell plugin for neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/mcsraw.py b/src/spikeinterface/extractors/neoextractors/mcsraw.py index bb8561daae..2220ef41c8 100644 --- a/src/spikeinterface/extractors/neoextractors/mcsraw.py +++ b/src/spikeinterface/extractors/neoextractors/mcsraw.py @@ -18,15 +18,15 @@ class MCSRawRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - block_index: int, default: None + block_index : int, default : None If there are several blocks, specify the block index you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/mearec.py b/src/spikeinterface/extractors/neoextractors/mearec.py index b7735c01d7..21e51eb0fd 100644 --- a/src/spikeinterface/extractors/neoextractors/mearec.py +++ b/src/spikeinterface/extractors/neoextractors/mearec.py @@ -34,9 +34,9 @@ class MEArecRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ @@ -120,14 +120,14 @@ def read_mearec(file_path): Parameters ---------- - file_path: str or Path + file_path : str or Path Path to MEArec h5 file Returns ------- - recording: MEArecRecordingExtractor + recording : MEArecRecordingExtractor The recording extractor object - sorting: MEArecSortingExtractor + sorting : MEArecSortingExtractor The sorting extractor object """ recording = MEArecRecordingExtractor(file_path) diff --git a/src/spikeinterface/extractors/neoextractors/neuralynx.py b/src/spikeinterface/extractors/neoextractors/neuralynx.py index cc89302e59..fa35b0396e 100644 --- a/src/spikeinterface/extractors/neoextractors/neuralynx.py +++ b/src/spikeinterface/extractors/neoextractors/neuralynx.py @@ -18,18 +18,18 @@ class NeuralynxRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str + folder_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. - exlude_filename: list[str], default: None + exlude_filename : list[str], default : None List of filename to exclude from the loading. For example, use `exclude_filename=["events.nev"]` to skip loading the event file. - strict_gap_mode: bool, default: False + strict_gap_mode : bool, default : False See neo documentation. Detect gaps using strict mode or not. * strict_gap_mode = True then a gap is consider when timstamp difference between two @@ -78,14 +78,14 @@ class NeuralynxSortingExtractor(NeoBaseSortingExtractor): Parameters ---------- - folder_path: str + folder_path : str The file path to load the recordings from. - sampling_frequency: float + sampling_frequency : float The sampling frequency for the spiking channels. When the signal data is available (.ncs) those files will be used to extract the frequency. Otherwise, the sampling frequency needs to be specified for this extractor. - stream_id: str, default: None + stream_id : str, default : None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name: str, default: None + stream_name : str, default : None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py index 7e47788d3c..18f0dfcf5f 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py +++ b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py @@ -36,14 +36,14 @@ class NeuroExplorerRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. For this neo reader streams are defined by their sampling frequency. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuroscope.py b/src/spikeinterface/extractors/neoextractors/neuroscope.py index e23ab70f7a..b386122266 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroscope.py +++ b/src/spikeinterface/extractors/neoextractors/neuroscope.py @@ -25,15 +25,15 @@ class NeuroScopeRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to the binary container usually a .dat, .lfp, .eeg extension. - xml_file_path: str, default: None + xml_file_path : str, default : None The path to the xml file. If None, the xml file is assumed to have the same name as the binary file. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ @@ -94,12 +94,12 @@ class NeuroScopeSortingExtractor(BaseSorting): clufile_path : PathType Optional. Path to a particular .clu text file. If given, only the single .clu file (and the respective .res file) are loaded - keep_mua_units : bool, default: True + keep_mua_units : bool, default : True Optional. Whether or not to return sorted spikes from multi-unit activity exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res.%i and .clu.%i pairs. - xml_file_path : PathType, default: None + xml_file_path : PathType, default : None Path to the .xml file referenced by this sorting. """ @@ -302,18 +302,18 @@ def read_neuroscope( Parameters ---------- - file_path: str + file_path : str The xml file. - stream_id: str or None + stream_id : str or None The stream id to load. If None, the first stream is loaded - keep_mua_units: bool, default: False + keep_mua_units : bool, default : False Optional. Whether or not to return sorted spikes from multi-unit activity - exclude_shanks: list + exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res. % i and .clu. % i pairs. - load_recording: bool, default: True + load_recording : bool, default : True If True, the recording is loaded - load_sorting: bool, default: False + load_sorting : bool, default : False If True, the sorting is loaded """ outputs = () diff --git a/src/spikeinterface/extractors/neoextractors/nix.py b/src/spikeinterface/extractors/neoextractors/nix.py index 1fef915a36..31cce32a41 100644 --- a/src/spikeinterface/extractors/neoextractors/nix.py +++ b/src/spikeinterface/extractors/neoextractors/nix.py @@ -15,15 +15,15 @@ class NixRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - block_index: int, default: None + block_index : int, default : None If there are several blocks, specify the block index you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 419bb81451..80ff098a00 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -49,17 +49,17 @@ class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str + folder_path : str The folder path to load the recordings from - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load - block_index: int, default: None + block_index : int, default : None If there are several blocks (experiments), specify the block index you want to load - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotation from neo - ignore_timestamps_errors: None + ignore_timestamps_errors : None Deprecated keyword argument. This is now ignored. neo.OpenEphysRawIO is now handling gaps directly but makes the read slower. """ @@ -114,26 +114,26 @@ class OpenEphysBinaryRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str + folder_path : str The folder path to the root folder (containing the record node folders) - load_sync_channel : bool, default: False + load_sync_channel : bool, default : False If False (default) and a SYNC channel is present (e.g. Neuropixels), this is not loaded If True, the SYNC channel is loaded and can be accessed in the analog signals. - load_sync_timestamps : bool, default: False + load_sync_timestamps : bool, default : False If True, the synchronized_timestamps are loaded and set as times to the recording. If False (default), only the t_start and sampling rate are set, and timestamps are assumed to be uniform and linearly increasing - experiment_names: str, list, or None, default: None + experiment_names : str, list, or None, default : None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load - block_index: int, default: None + block_index : int, default : None If there are several blocks (experiments), specify the block index you want to load - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotation from neo """ @@ -283,7 +283,7 @@ class OpenEphysBinaryEventExtractor(NeoBaseEventExtractor): Parameters ---------- - folder_path: str + folder_path : str """ @@ -307,38 +307,38 @@ def read_openephys(folder_path, **kwargs): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to openephys folder - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load - block_index: int, default: None + block_index : int, default : None If there are several blocks (experiments), specify the block index you want to load - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotation from neo - load_sync_channel : bool, default: False + load_sync_channel : bool, default : False If False (default) and a SYNC channel is present (e.g. Neuropixels), this is not loaded. If True, the SYNC channel is loaded and can be accessed in the analog signals. For open ephsy binary format only - load_sync_timestamps : bool, default: False + load_sync_timestamps : bool, default : False If True, the synchronized_timestamps are loaded and set as times to the recording. If False (default), only the t_start and sampling rate are set, and timestamps are assumed to be uniform and linearly increasing. For open ephsy binary format only - experiment_names: str, list, or None, default: None + experiment_names : str, list, or None, default : None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` For open ephsy binary format only - ignore_timestamps_errors: bool, default: False + ignore_timestamps_errors : bool, default : False Ignore the discontinuous timestamps errors in neo For open ephsy legacy format only Returns ------- - recording: OpenEphysLegacyRecordingExtractor or OpenEphysBinaryExtractor + recording : OpenEphysLegacyRecordingExtractor or OpenEphysBinaryExtractor """ # auto guess format files = [f for f in Path(folder_path).iterdir()] @@ -357,14 +357,14 @@ def read_openephys_event(folder_path, block_index=None): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to openephys folder - block_index: int, default: None + block_index : int, default : None If there are several blocks (experiments), specify the block index you want to load. Returns ------- - event: OpenEphysBinaryEventExtractor + event : OpenEphysBinaryEventExtractor """ # auto guess format files = [str(f) for f in Path(folder_path).iterdir()] diff --git a/src/spikeinterface/extractors/neoextractors/plexon.py b/src/spikeinterface/extractors/neoextractors/plexon.py index a38f041b0d..08f2f8afe8 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon.py +++ b/src/spikeinterface/extractors/neoextractors/plexon.py @@ -15,13 +15,13 @@ class PlexonRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ @@ -50,7 +50,7 @@ class PlexonSortingExtractor(NeoBaseSortingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. """ diff --git a/src/spikeinterface/extractors/neoextractors/plexon2.py b/src/spikeinterface/extractors/neoextractors/plexon2.py index 01d0019959..fc325d14e7 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon2.py +++ b/src/spikeinterface/extractors/neoextractors/plexon2.py @@ -13,13 +13,13 @@ class Plexon2RecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ @@ -48,9 +48,9 @@ class Plexon2SortingExtractor(NeoBaseSortingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - sampling_frequency: float, default: None + sampling_frequency : float, default : None The sampling frequency of the sorting (required for multiple streams with different sampling frequencies). """ @@ -82,7 +82,7 @@ class Plexon2EventExtractor(NeoBaseEventExtractor): Parameters ---------- - folder_path: str + folder_path : str """ diff --git a/src/spikeinterface/extractors/neoextractors/spike2.py b/src/spikeinterface/extractors/neoextractors/spike2.py index b54917026c..9f22a57ed1 100644 --- a/src/spikeinterface/extractors/neoextractors/spike2.py +++ b/src/spikeinterface/extractors/neoextractors/spike2.py @@ -16,13 +16,13 @@ class Spike2RecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str, default: None + stream_id : str, default : None If there are several streams, specify the stream id you want to load. - stream_name: str, default: None + stream_name : str, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/spikegadgets.py b/src/spikeinterface/extractors/neoextractors/spikegadgets.py index 7d6b492325..4bcf5eb1e9 100644 --- a/src/spikeinterface/extractors/neoextractors/spikegadgets.py +++ b/src/spikeinterface/extractors/neoextractors/spikegadgets.py @@ -18,13 +18,13 @@ class SpikeGadgetsRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - file_path: str + file_path : str The file path to load the recordings from. - stream_id: str or None, default: None + stream_id : str or None, default : None If there are several streams, specify the stream id you want to load. - stream_name: str or None, default: None + stream_name : str or None, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/spikeglx.py b/src/spikeinterface/extractors/neoextractors/spikeglx.py index e646caaafd..4d594c548a 100644 --- a/src/spikeinterface/extractors/neoextractors/spikeglx.py +++ b/src/spikeinterface/extractors/neoextractors/spikeglx.py @@ -30,17 +30,17 @@ class SpikeGLXRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str + folder_path : str The folder path to load the recordings from. - load_sync_channel: bool default: False + load_sync_channel : bool default : False Whether or not to load the last channel in the stream, which is typically used for synchronization. If True, then the probe is not loaded. - stream_id: str or None, default: None + stream_id : str or None, default : None If there are several streams, specify the stream id you want to load. For example, "imec0.ap", "nidq", or "imec0.lf". - stream_name: str or None, default: None + stream_name : str or None, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/tdt.py b/src/spikeinterface/extractors/neoextractors/tdt.py index 18280df925..3834f14d4b 100644 --- a/src/spikeinterface/extractors/neoextractors/tdt.py +++ b/src/spikeinterface/extractors/neoextractors/tdt.py @@ -15,13 +15,13 @@ class TdtRecordingExtractor(NeoBaseRecordingExtractor): Parameters ---------- - folder_path: str + folder_path : str The folder path to the tdt folder. - stream_id: str or None, default: None + stream_id : str or None, default : None If there are several streams, specify the stream id you want to load. - stream_name: str or None, default: None + stream_name : str or None, default : None If there are several streams, specify the stream name you want to load. - all_annotations: bool, default: False + all_annotations : bool, default : False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/nwbextractors.py b/src/spikeinterface/extractors/nwbextractors.py index 2aa34533a6..ace2411482 100644 --- a/src/spikeinterface/extractors/nwbextractors.py +++ b/src/spikeinterface/extractors/nwbextractors.py @@ -111,12 +111,12 @@ def read_nwbfile( The path to the NWB file. Either provide this or file. file : file-like object or None The file-like object to read from. Either provide this or file_path. - stream_mode : "fsspec" | "remfile" | None, default: None + stream_mode : "fsspec" | "remfile" | None, default : None The streaming mode to use. If None it assumes the file is on the local disk. - cache: bool, default: False + cache : bool, default : False If True, the file is cached in the file passed to stream_cache_path if False, the file is not cached. - stream_cache_path : str or None, default: None + stream_cache_path : str or None, default : None The path to the cache storage, when default to None it uses the a temporary folder. Returns @@ -170,7 +170,7 @@ def _retrieve_electrical_series_pynwb( ---------- nwbfile : NWBFile The NWBFile object from which to extract the ElectricalSeries. - electrical_series_path : str, default: None + electrical_series_path : str, default : None The name of the ElectricalSeries to extract. If not specified, it will return the first found ElectricalSeries if there's only one; otherwise, it raises an error. @@ -224,7 +224,7 @@ def _retrieve_unit_table_pynwb(nwbfile: "NWBFile", unit_table_path: Optional[str ---------- nwbfile : NWBFile The NWBFile object from which to extract the Units. - unit_table_path : str, default: None + unit_table_path : str, default : None The path of the Units to extract. If not specified, it will return the first found Units if there's only one; otherwise, it raises an error. @@ -406,12 +406,12 @@ class NwbRecordingExtractor(BaseRecording): Parameters ---------- - file_path: str, Path, or None + file_path : str, Path, or None Path to the NWB file or an s3 URL. Use this parameter to specify the file location if not using the `file` parameter. - electrical_series_name: str or None, default: None + electrical_series_name : str or None, default : None Deprecated, use `electrical_series_path` instead. - electrical_series_path: str or None, default: None + electrical_series_path : str or None, default : None The name of the ElectricalSeries object within the NWB file. This parameter is crucial when the NWB file contains multiple ElectricalSeries objects. It helps in identifying which specific series to extract data from. If there is only one ElectricalSeries and @@ -419,30 +419,30 @@ class NwbRecordingExtractor(BaseRecording): If multiple ElectricalSeries are present and this parameter is not set, an error is raised. The `electrical_series_path` corresponds to the path within the NWB file, e.g., 'acquisition/MyElectricalSeries`. - load_time_vector: bool, default: False + load_time_vector : bool, default : False If set to True, the time vector is also loaded into the recording object. Useful for cases where precise timing information is required. - samples_for_rate_estimation: int, default: 1000 + samples_for_rate_estimation : int, default : 1000 The number of timestamp samples used for estimating the sampling rate. This is relevant when the 'rate' attribute is not available in the ElectricalSeries. - stream_mode : "fsspec" | "remfile" | "zarr" | None, default: None + stream_mode : "fsspec" | "remfile" | "zarr" | None, default : None Determines the streaming mode for reading the file. Use this for optimized reading from different sources, such as local disk or remote servers. - load_channel_properties: bool, default: True + load_channel_properties : bool, default : True If True, all the channel properties are loaded from the NWB file and stored as properties. For streaming purposes, it can be useful to set this to False to speed up streaming. - file: file-like object or None, default: None + file : file-like object or None, default : None A file-like object representing the NWB file. Use this parameter if you have an in-memory representation of the NWB file instead of a file path. - cache: bool, default: False + cache : bool, default : False Indicates whether to cache the file locally when using streaming. Caching can improve performance for remote files. - stream_cache_path: str, Path, or None, default: None + stream_cache_path : str, Path, or None, default : None Specifies the local path for caching the file. Relevant only if `cache` is True. - storage_options: dict | None = None, + storage_options : dict | None = None, These are the additional kwargs (e.g. AWS credentials) that are passed to the zarr.open convenience function. This is only used on the "zarr" stream_mode. - use_pynwb: bool, default: False + use_pynwb : bool, default : False Uses the pynwb library to read the NWB file. Setting this to False, the default, uses h5py to read the file. Using h5py can improve performance by bypassing some of the PyNWB validations. @@ -876,7 +876,7 @@ def fetch_available_electrical_series_paths( stream_mode : "fsspec" | "remfile" | "zarr" | None, optional Determines the streaming mode for reading the file. Use this for optimized reading from different sources, such as local disk or remote servers. - storage_options: dict | None = None, + storage_options : dict | None = None, These are the additional kwargs (e.g. AWS credentials) that are passed to the zarr.open convenience function. This is only used on the "zarr" stream_mode. Returns @@ -927,7 +927,7 @@ def get_num_samples(self): """Returns the number of samples in this signal block Returns: - SampleIndex: Number of samples in the signal block + SampleIndex : Number of samples in the signal block """ return self._num_samples @@ -961,24 +961,24 @@ class NwbSortingExtractor(BaseSorting): """Load an NWBFile as a SortingExtractor. Parameters ---------- - file_path: str or Path + file_path : str or Path Path to NWB file. - electrical_series_path: str or None, default: None + electrical_series_path : str or None, default : None The name of the ElectricalSeries (if multiple ElectricalSeries are present). - sampling_frequency: float or None, default: None + sampling_frequency : float or None, default : None The sampling frequency in Hz (required if no ElectricalSeries is available). - unit_table_path: str or None, default: "units" + unit_table_path : str or None, default : "units" The path of the unit table in the NWB file. - samples_for_rate_estimation: int, default: 100000 + samples_for_rate_estimation : int, default : 100000 The number of timestamp samples to use to estimate the rate. Used if "rate" is not specified in the ElectricalSeries. - stream_mode : "fsspec" | "remfile" | "zarr" | None, default: None + stream_mode : "fsspec" | "remfile" | "zarr" | None, default : None The streaming mode to use. If None it assumes the file is on the local disk. - stream_cache_path: str or Path or None, default: None + stream_cache_path : str or Path or None, default : None Local path for caching. If None it uses the system temporary directory. - load_unit_properties: bool, default: True + load_unit_properties : bool, default : True If True, all the unit properties are loaded from the NWB file and stored as properties. - t_start: float or None, default: None + t_start : float or None, default : None This is the time at which the corresponding ElectricalSeries start. NWB stores its spikes as times and the `t_start` is used to convert the times to seconds. Concrently, the returned frames are computed as: @@ -990,19 +990,19 @@ class NwbSortingExtractor(BaseSorting): When a `t_start` is not provided it will be inferred from the corresponding ElectricalSeries with name equal to `electrical_series_path`. The `t_start` then will be either the `ElectricalSeries.starting_time` or the first timestamp in the `ElectricalSeries.timestamps`. - cache: bool, default: False + cache : bool, default : False If True, the file is cached in the file passed to stream_cache_path if False, the file is not cached. - storage_options: dict | None = None, + storage_options : dict | None = None, These are the additional kwargs (e.g. AWS credentials) that are passed to the zarr.open convenience function. This is only used on the "zarr" stream_mode. - use_pynwb: bool, default: False + use_pynwb : bool, default : False Uses the pynwb library to read the NWB file. Setting this to False, the default, uses h5py to read the file. Using h5py can improve performance by bypassing some of the PyNWB validations. Returns ------- - sorting: NwbSortingExtractor + sorting : NwbSortingExtractor The sorting extractor for the NWB file. """ @@ -1372,18 +1372,18 @@ def read_nwb(file_path, load_recording=True, load_sorting=False, electrical_seri Parameters ---------- - file_path: str or Path + file_path : str or Path Path to NWB file. - load_recording : bool, default: True + load_recording : bool, default : True If True, the recording object is loaded. - load_sorting : bool, default: False + load_sorting : bool, default : False If True, the recording object is loaded. - electrical_series_path: str or None, default: None + electrical_series_path : str or None, default : None The name of the ElectricalSeries (if multiple ElectricalSeries are present) Returns ------- - extractors: extractor or tuple + extractors : extractor or tuple Single RecordingExtractor/SortingExtractor or tuple with both (depending on "load_recording"/"load_sorting") arguments. """ diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index e26c2eb027..3e923bacdb 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -14,15 +14,15 @@ class BasePhyKilosortSortingExtractor(BaseSorting): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the output Phy folder (containing the params.py) - exclude_cluster_groups: list or str, default: None + exclude_cluster_groups : list or str, default : None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). - keep_good_only : bool, default: True + keep_good_only : bool, default : True Whether to only keep good units. - remove_empty_units : bool, default: True + remove_empty_units : bool, default : True If True, empty units are removed from the sorting extractor. - load_all_cluster_properties : bool, default: True + load_all_cluster_properties : bool, default : True If True, all cluster properties are loaded from the tsv/csv files. """ @@ -204,11 +204,11 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups: list or str, default: None + exclude_cluster_groups : list or str, default : None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). - load_all_cluster_properties : bool, default: True + load_all_cluster_properties : bool, default : True If True, all cluster properties are loaded from the tsv/csv files. Returns @@ -245,12 +245,12 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): Parameters ---------- - folder_path: str or Path + folder_path : str or Path Path to the output Phy folder (containing the params.py). - keep_good_only : bool, default: True + keep_good_only : bool, default : True Whether to only keep good units. If True, only Kilosort-labeled 'good' units are returned. - remove_empty_units : bool, default: True + remove_empty_units : bool, default : True If True, empty units are removed from the sorting extractor. Returns diff --git a/src/spikeinterface/extractors/toy_example.py b/src/spikeinterface/extractors/toy_example.py index f3576372a4..6aaee141b9 100644 --- a/src/spikeinterface/extractors/toy_example.py +++ b/src/spikeinterface/extractors/toy_example.py @@ -43,32 +43,32 @@ def toy_example( Parameters ---------- - duration: float or list[float], default: 10 + duration : float or list[float], default : 10 Duration in seconds. If a list is provided, it will be the duration of each segment. - num_channels: int, default: 4 + num_channels : int, default : 4 Number of channels - num_units: int, default: 10 + num_units : int, default : 10 Number of units - sampling_frequency: float, default: 30000 + sampling_frequency : float, default : 30000 Sampling frequency - num_segments: int, default: 2 + num_segments : int, default : 2 Number of segments. - spike_times: np.array or list[nparray] or None, default: None + spike_times : np.array or list[nparray] or None, default : None Spike time in the recording - spike_labels: np.array or list[nparray] or None, default: None + spike_labels : np.array or list[nparray] or None, default : None Cluster label for each spike time (needs to specified both together). - # score_detection: int (between 0 and 1) + # score_detection : int (between 0 and 1) # Generate the sorting based on a subset of spikes compare with the trace generation - firing_rate: float, default: 3.0 + firing_rate : float, default : 3.0 The firing rate for the units (in Hz) - seed: int or None, default: None + seed : int or None, default : None Seed for random initialization. Returns ------- - recording: RecordingExtractor + recording : RecordingExtractor The output recording extractor. - sorting: SortingExtractor + sorting : SortingExtractor The output sorting extractor. """ diff --git a/src/spikeinterface/generation/drift_tools.py b/src/spikeinterface/generation/drift_tools.py index bb73e0fced..4ab1a24165 100644 --- a/src/spikeinterface/generation/drift_tools.py +++ b/src/spikeinterface/generation/drift_tools.py @@ -16,21 +16,21 @@ def interpolate_templates(templates_array, source_locations, dest_locations, int Parameters ---------- - templates_array: np.array + templates_array : np.array A numpy array with dense templates_array. shape = (num_templates, num_samples, num_channels) - source_locations: np.array + source_locations : np.array The channel source location corresponding to templates_array. shape = (num_channels, 2) - dest_locations: np.array + dest_locations : np.array The new channel position, if ndim == 3, then the interpolation is broadcated with last dim. shape = (num_channels, 2) or (num_motions, num_channels, 2) - interpolation_method: str, default "cubic" + interpolation_method : str, default "cubic" The interpolation method. Returns ------- - new_templates_array: np.array + new_templates_array : np.array shape = (num_templates, num_samples, num_channels) or = (num_motions, num_templates, num_samples, num_channel) """ import scipy.interpolate @@ -72,23 +72,23 @@ def move_dense_templates(templates_array, displacements, source_probe, dest_prob Parameters ---------- - templates_array: np.array + templates_array : np.array A numpy array with dense templates_array. shape = (num_templates, num_samples, num_channels) - displacements: np.array + displacements : np.array Displacement vector - shape: (num_displacement, 2) - source_probe: Probe + shape : (num_displacement, 2) + source_probe : Probe The Probe object on which templates_array are defined - dest_probe: Probe | None, default: None + dest_probe : Probe | None, default : None The destination Probe. Can be different geometry than the original. If None then the same probe is used. - interpolation_method: "cubic" | "linear", default: "cubic" + interpolation_method : "cubic" | "linear", default : "cubic" The interpolation method. Returns ------- - new_templates_array: np.array + new_templates_array : np.array shape = (num_displacement, num_templates, num_samples, num_channels) """ assert displacements.ndim == 2 @@ -139,16 +139,16 @@ def move_one_template(self, unit_index, displacement, **interpolation_kwargs): Parameters ---------- - unit_index: int + unit_index : int The unit index to move. - displacements: np.array + displacements : np.array The displacement vector. shape = (1, 2) - **interpolation_kwargs: keyword arguments for `move_dense_templates` function + **interpolation_kwargs : keyword arguments for `move_dense_templates` function Returns ------- - template_array_moved: np.array + template_array_moved : np.array The moved template. shape = (num_displacements, num_samples, num_channels) """ @@ -171,10 +171,10 @@ def precompute_displacements(self, displacements, **interpolation_kwargs): Parameters ---------- - displacements: np.array + displacements : np.array The displacement vector. shape = (num_displacements, 2) - **interpolation_kwargs: keyword arguments for `move_dense_templates` function + **interpolation_kwargs : keyword arguments for `move_dense_templates` function """ dense_static_templates = self.get_dense_templates() @@ -190,16 +190,16 @@ def make_linear_displacement(start, stop, num_step=10): Parameters ---------- - start: np.array of 2 elements + start : np.array of 2 elements The start position. - stop: np.array of 2 elements + stop : np.array of 2 elements The stop position. - num_step: int, default: 10 + num_step : int, default : 10 The number of steps between start and stop. Returns ------- - displacements: np.array + displacements : np.array The displacements with shape (num_step, 2) """ displacements = (stop[np.newaxis, :] - start[np.newaxis, :]) / (num_step - 1) * np.arange(num_step)[ @@ -215,28 +215,28 @@ class InjectDriftingTemplatesRecording(BaseRecording): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting Sorting object containing all the units and their spike train - drifting_templates: DriftingTemplates + drifting_templates : DriftingTemplates The drifting template object - displacement_vectors: list of numpy array + displacement_vectors : list of numpy array The lenght of the list is the number of segment. Per segment, the drift vector is a numpy array with shape (num_times, 2, num_motions) num_motions is generally = 1 but can be > 1 in case of combining several drift vectors - displacement_sampling_frequency: float + displacement_sampling_frequency : float The sampling frequency of drift vector - displacement_unit_factor: numpy array or None, default: None + displacement_unit_factor : numpy array or None, default : None A array containing the factor per unit of the drift. This is used to create non rigid with a factor gradient of depending on units position. shape (num_units, num_motions) If None then all unit have the same factor (1) and the drift is rigid. - parent_recording: BaseRecording or None, default: None + parent_recording : BaseRecording or None, default : None The recording over which to add the templates. If None, will default to traces containing all 0. - num_samples: list[int] or int or None, default: None + num_samples : list[int] or int or None, default : None The number of samples in the recording per segment. You can use int for mono-segment objects. - amplitude_factor: list of numpy array or numpy array or float or None, default: None + amplitude_factor : list of numpy array or numpy array or float or None, default : None Controls the amplitude scaling for each spike for each unit. If None, no amplitude scaling is applied. If scalar all spikes have the same factor (certainly useless). @@ -244,7 +244,7 @@ class InjectDriftingTemplatesRecording(BaseRecording): Returns ------- - injected_recording: InjectDriftingTemplatesRecording + injected_recording : InjectDriftingTemplatesRecording The recording with the templates injected. """ diff --git a/src/spikeinterface/postprocessing/alignsorting.py b/src/spikeinterface/postprocessing/alignsorting.py index 4f031eb635..c8b6bd337c 100644 --- a/src/spikeinterface/postprocessing/alignsorting.py +++ b/src/spikeinterface/postprocessing/alignsorting.py @@ -15,16 +15,16 @@ class AlignSortingExtractor(BaseSorting): Parameters ---------- - sorting: BaseSorting + sorting : BaseSorting The sorting to align. - unit_peak_shifts: dict + unit_peak_shifts : dict Dictionary mapping the unit_id to the unit's shift (in number of samples). A positive shift means the spike train is shifted back in time, while a negative shift means the spike train is shifted forward. Returns ------- - aligned_sorting: AlignSortingExtractor + aligned_sorting : AlignSortingExtractor The aligned sorting. """ diff --git a/src/spikeinterface/preprocessing/clip.py b/src/spikeinterface/preprocessing/clip.py index 100fa77703..40cd29e53d 100644 --- a/src/spikeinterface/preprocessing/clip.py +++ b/src/spikeinterface/preprocessing/clip.py @@ -15,18 +15,18 @@ class ClipRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be transformed - a_min: float or None, default: None + a_min : float or None, default : None Minimum value. If `None`, clipping is not performed on lower interval edge. - a_max: float or None, default: None + a_max : float or None, default : None Maximum value. If `None`, clipping is not performed on upper interval edge. Returns ------- - rescaled_traces: ClipTracesRecording + rescaled_traces : ClipTracesRecording The clipped traces recording extractor object """ @@ -57,31 +57,31 @@ class BlankSaturationRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be transformed Minimum value. If `None`, clipping is not performed on lower interval edge. - abs_threshold: float or None, default: None + abs_threshold : float or None, default : None The absolute value for considering that the signal is saturating - quantile_threshold: float or None, default: None + quantile_threshold : float or None, default : None Tha value in [0, 1] used if abs_threshold is None to automatically set the abs_threshold given the data. Must be provided if abs_threshold is None - direction: "upper" | "lower" | "both", default: "upper" + direction : "upper" | "lower" | "both", default : "upper" Only values higher than the detection threshold are set to fill_value ("higher"), or only values lower than the detection threshold ("lower"), or both ("both") - fill_value: float or None, default: None + fill_value : float or None, default : None The value to write instead of the saturating signal. If None, then the value is automatically computed as the median signal value - num_chunks_per_segment: int, default: 50 + num_chunks_per_segment : int, default : 50 The number of chunks per segments to consider to estimate the threshold/fill_values - chunk_size: int, default: 500 + chunk_size : int, default : 500 The chunk size to estimate the threshold/fill_values - seed: int, default: 0 + seed : int, default : 0 The seed to select the random chunks Returns ------- - rescaled_traces: BlankSaturationRecording + rescaled_traces : BlankSaturationRecording The filtered traces recording extractor object """ diff --git a/src/spikeinterface/preprocessing/common_reference.py b/src/spikeinterface/preprocessing/common_reference.py index a4369309f2..81bd737ada 100644 --- a/src/spikeinterface/preprocessing/common_reference.py +++ b/src/spikeinterface/preprocessing/common_reference.py @@ -36,26 +36,26 @@ class CommonReferenceRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be re-referenced - reference: "global" | "single" | "local", default: "global" + reference : "global" | "single" | "local", default : "global" If "global" the reference is the average or median across all the channels. If "single", the reference is a single channel or a list of channels that need to be set with the `ref_channel_ids`. If "local", the reference is the set of channels within an annulus that must be set with the `local_radius` parameter. - operator: "median" | "average", default: "median" + operator : "median" | "average", default : "median" If "median", a common median reference (CMR) is implemented (the median of the selected channels is removed for each timestamp). If "average", common average reference (CAR) is implemented (the mean of the selected channels is removed for each timestamp). - groups: list or None, default: None + groups : list or None, default : None List of lists containing the channel ids for splitting the reference. The CMR, CAR, or referencing with respect to single channels are applied group-wise. However, this is not applied for the local CAR. It is useful when dealing with different channel groups, e.g. multiple tetrodes. - ref_channel_ids: list or str or int, default: None + ref_channel_ids : list or str or int, default : None If no "groups" are specified, all channels are referenced to "ref_channel_ids". If "groups" is provided, then a list of channels to be applied to each group is expected. If "single" reference, a list of one channel or an int is expected. - local_radius: tuple(int, int), default: (30, 55) + local_radius : tuple(int, int), default : (30, 55) Use in the local CAR implementation as the selecting annulus with the following format: `(exclude radius, include radius)` @@ -65,12 +65,12 @@ class CommonReferenceRecording(BasePreprocessor): include radius delineates the outer boundary of the annulus whose role is to exclude channels that are too far away. - dtype: None or dtype, default: None + dtype : None or dtype, default : None If None the parent dtype is kept. Returns ------- - referenced_recording: CommonReferenceRecording + referenced_recording : CommonReferenceRecording The re-referenced recording extractor object """ @@ -230,7 +230,7 @@ def slice_groups(self, channel_indices): Parameters ---------- - channel_indices: array-like + channel_indices : array-like The channel indices to be sliced Returns diff --git a/src/spikeinterface/preprocessing/correct_lsb.py b/src/spikeinterface/preprocessing/correct_lsb.py index 868e2aff3c..01c30cd5b0 100644 --- a/src/spikeinterface/preprocessing/correct_lsb.py +++ b/src/spikeinterface/preprocessing/correct_lsb.py @@ -16,18 +16,18 @@ def correct_lsb(recording, num_chunks_per_segment=20, chunk_size=10000, seed=Non ---------- recording : RecordingExtractor The recording extractor to be LSB-corrected. - num_chunks_per_segment: int, default: 20 + num_chunks_per_segment : int, default : 20 Number of chunks per segment for random chunk - chunk_size : int, default: 10000 + chunk_size : int, default : 10000 Size of a chunk in number for random chunk - seed : int or None, default: None + seed : int or None, default : None Random seed for random chunk - verbose : bool, default: False + verbose : bool, default : False If True, estimate LSB value is printed Returns ------- - correct_lsb_recording: ScaleRecording + correct_lsb_recording : ScaleRecording The recording extractor with corrected LSB """ random_data = get_random_data_chunks( diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index bd483ab403..2fda7bbca0 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -16,11 +16,11 @@ class DepthOrderRecording(ChannelSliceRecording): The recording to re-order. channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str or tuple, list, default: ("x", "y") + dimensions : str or tuple, list, default : ("x", "y") If str, it needs to be "x", "y", "z". If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity - flip: bool, default: False + flip : bool, default : False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. """ diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index e2e4f46c54..aa5bae946d 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -53,47 +53,47 @@ def detect_bad_channels( ---------- recording : BaseRecording The recording for which bad channels are detected - method : "coeherence+psd" | "std" | "mad" | "neighborhood_r2", default: "coeherence+psd" + method : "coeherence+psd" | "std" | "mad" | "neighborhood_r2", default : "coeherence+psd" The method to be used for bad channel detection - std_mad_threshold : float, default: 5 + std_mad_threshold : float, default : 5 The standard deviation/mad multiplier threshold - psd_hf_threshold (coeherence+psd) : float, default: 0.02 + psd_hf_threshold (coeherence+psd) : float, default : 0.02 An absolute threshold (uV^2/Hz) used as a cutoff for noise channels. Channels with average power at >80% Nyquist larger than this threshold will be labeled as noise - dead_channel_threshold (coeherence+psd) : float, default: -0.5 + dead_channel_threshold (coeherence+psd) : float, default : -0.5 Threshold for channel coherence below which channels are labeled as dead - noisy_channel_threshold (coeherence+psd) : float, default: 1 + noisy_channel_threshold (coeherence+psd) : float, default : 1 Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) - outside_channel_threshold (coeherence+psd) : float, default: -0.75 + outside_channel_threshold (coeherence+psd) : float, default : -0.75 Threshold for channel coherence above which channels at the edge of the recording are marked as outside of the brain - outside_channels_location (coeherence+psd) : "top" | "bottom" | "both", default: "top" + outside_channels_location (coeherence+psd) : "top" | "bottom" | "both", default : "top" Location of the outside channels. If "top", only the channels at the top of the probe can be marked as outside channels. If "bottom", only the channels at the bottom of the probe can be marked as outside channels. If "both", both the channels at the top and bottom of the probe can be marked as outside channels - n_neighbors (coeherence+psd) : int, default: 11 + n_neighbors (coeherence+psd) : int, default : 11 Number of channel neighbors to compute median filter (needs to be odd) - nyquist_threshold (coeherence+psd) : float, default: 0.8 + nyquist_threshold (coeherence+psd) : float, default : 0.8 Frequency with respect to Nyquist (Fn=1) above which the mean of the PSD is calculated and compared with psd_hf_threshold - direction (coeherence+psd): "x" | "y" | "z", default: "y" + direction (coeherence+psd) : "x" | "y" | "z", default : "y" The depth dimension - highpass_filter_cutoff : float, default: 300 + highpass_filter_cutoff : float, default : 300 If the recording is not filtered, the cutoff frequency of the highpass filter - chunk_duration_s : float, default: 0.5 + chunk_duration_s : float, default : 0.5 Duration of each chunk - num_random_chunks : int, default: 100 + num_random_chunks : int, default : 100 Number of random chunks Having many chunks is important for reproducibility. - welch_window_ms : float, default: 10 + welch_window_ms : float, default : 10 Window size for the scipy.signal.welch that will be converted to nperseg - neighborhood_r2_threshold : float, default: 0.95 + neighborhood_r2_threshold : float, default : 0.95 R^2 threshold for the neighborhood_r2 method. - neighborhood_r2_radius_um : float, default: 30 + neighborhood_r2_radius_um : float, default : 30 Spatial radius below which two channels are considered neighbors in the neighborhood_r2 method. - seed : int or None, default: None + seed : int or None, default : None The random seed to extract chunks Returns @@ -298,19 +298,19 @@ def detect_bad_channels_ibl( psd_hf_threshold : float Threshold for high frequency PSD. If mean PSD above `nyquist_threshold` * fn is greater than this value, channels are flagged as noisy (together with channel coherence condition). - dead_channel_thr : float, default: -0.5 + dead_channel_thr : float, default : -0.5 Threshold for channel coherence below which channels are labeled as dead - noisy_channel_thr : float, default: 1 + noisy_channel_thr : float, default : 1 Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) - outside_channel_thr : float, default: -0.75 + outside_channel_thr : float, default : -0.75 Threshold for channel coherence above which channels - n_neighbors : int, default: 11 + n_neighbors : int, default : 11 Number of neighbors to compute median fitler - nyquist_threshold : float, default: 0.8 + nyquist_threshold : float, default : 0.8 Threshold on Nyquist frequency to calculate HF noise band - welch_window_ms: float, default: 0.3 + welch_window_ms : float, default : 0.3 Window size for the scipy.signal.welch that will be converted to nperseg - outside_channels_location : "top" | "bottom" | "both", default: "top" + outside_channels_location : "top" | "bottom" | "both", default : "top" Location of the outside channels. If "top", only the channels at the top of the probe can be marked as outside channels. If "bottom", only the channels at the bottom of the probe can be marked as outside channels. If "both", both the channels at the top and bottom of the probe can be @@ -378,9 +378,9 @@ def detrend(x, nmed): """ Subtract the trend from a vector The trend is a median filtered version of the said vector with tapering - :param x: input vector - :param nmed: number of points of the median filter - :return: np.array + :param x : input vector + :param nmed : number of points of the median filter + :return : np.array """ ntap = int(np.ceil(nmed / 2)) xf = np.r_[np.zeros(ntap) + x[0], x, np.zeros(ntap) + x[-1]] diff --git a/src/spikeinterface/preprocessing/filter.py b/src/spikeinterface/preprocessing/filter.py index db1dcfd092..24b7fb0d3f 100644 --- a/src/spikeinterface/preprocessing/filter.py +++ b/src/spikeinterface/preprocessing/filter.py @@ -8,13 +8,13 @@ from ..core import get_chunk_with_margin -_common_filter_docs = """**filter_kwargs: keyword arguments for parallel processing: +_common_filter_docs = """**filter_kwargs : keyword arguments for parallel processing: - * filter_order: order + * filter_order : order The order of the filter - * filter_mode: "sos or "ba" + * filter_mode : "sos or "ba" "sos" is bi quadratic and more stable than ab so thery are prefered. - * ftype: str + * ftype : str Filter type for iirdesign ("butter" / "cheby1" / ... all possible of scipy.signal.iirdesign) """ @@ -30,28 +30,28 @@ class FilterRecording(BasePreprocessor): Parameters ---------- - recording: Recording + recording : Recording The recording extractor to be re-referenced - band: float or list, default: [300.0, 6000.0] + band : float or list, default : [300.0, 6000.0] If float, cutoff frequency in Hz for "highpass" filter type If list. band (low, high) in Hz for "bandpass" filter type - btype: "bandpass" | "highpass", default: "bandpass" + btype : "bandpass" | "highpass", default : "bandpass" Type of the filter - margin_ms: float, default: 5.0 + margin_ms : float, default : 5.0 Margin in ms on border to avoid border effect - filter_mode: "sos" | "ba", default: "sos" + filter_mode : "sos" | "ba", default : "sos" Filter form of the filter coefficients: - second-order sections ("sos") - - numerator/denominator: ("ba") - coef: array or None, default: None + - numerator/denominator : ("ba") + coef : array or None, default : None Filter coefficients in the filter_mode form. - dtype: dtype or None, default: None + dtype : dtype or None, default : None The dtype of the returned traces. If None, the dtype of the parent recording is used {} Returns ------- - filter_recording: FilterRecording + filter_recording : FilterRecording The filtered recording extractor object """ @@ -168,20 +168,20 @@ class BandpassFilterRecording(FilterRecording): Parameters ---------- - recording: Recording + recording : Recording The recording extractor to be re-referenced - freq_min: float + freq_min : float The highpass cutoff frequency in Hz - freq_max: float + freq_max : float The lowpass cutoff frequency in Hz - margin_ms: float + margin_ms : float Margin in ms on border to avoid border effect - dtype: dtype or None + dtype : dtype or None The dtype of the returned traces. If None, the dtype of the parent recording is used {} Returns ------- - filter_recording: BandpassFilterRecording + filter_recording : BandpassFilterRecording The bandpass-filtered recording extractor object """ @@ -204,18 +204,18 @@ class HighpassFilterRecording(FilterRecording): Parameters ---------- - recording: Recording + recording : Recording The recording extractor to be re-referenced - freq_min: float + freq_min : float The highpass cutoff frequency in Hz - margin_ms: float + margin_ms : float Margin in ms on border to avoid border effect - dtype: dtype or None + dtype : dtype or None The dtype of the returned traces. If None, the dtype of the parent recording is used {} Returns ------- - filter_recording: HighpassFilterRecording + filter_recording : HighpassFilterRecording The highpass-filtered recording extractor object """ @@ -234,16 +234,16 @@ class NotchFilterRecording(BasePreprocessor): """ Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be notch-filtered - freq: int or float + freq : int or float The target frequency in Hz of the notch filter - q: int + q : int The quality factor of the notch filter {} Returns ------- - filter_recording: NotchFilterRecording + filter_recording : NotchFilterRecording The notch-filtered recording extractor object """ diff --git a/src/spikeinterface/preprocessing/filter_gaussian.py b/src/spikeinterface/preprocessing/filter_gaussian.py index e111007a06..cc8efa7d89 100644 --- a/src/spikeinterface/preprocessing/filter_gaussian.py +++ b/src/spikeinterface/preprocessing/filter_gaussian.py @@ -23,20 +23,20 @@ class GaussianFilterRecording(BasePreprocessor): Parameters ---------- - recording: BaseRecording + recording : BaseRecording The recording extractor to be filtered. - freq_min: float or None + freq_min : float or None The lower frequency cutoff for the bandpass filter. If None, the resulting object is a lowpass filter. - freq_max: float or None + freq_max : float or None The higher frequency cutoff for the bandpass filter. If None, the resulting object is a highpass filter. - margin_sd: float, default: 5.0 + margin_sd : float, default : 5.0 The number of standard deviation to take for margins. Returns ------- - gaussian_filtered_recording: GaussianFilterRecording + gaussian_filtered_recording : GaussianFilterRecording The filtered recording extractor object. """ diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index 1d69af176a..1a1edfb917 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -21,23 +21,23 @@ class InterpolateBadChannelsRecording(BasePreprocessor): Parameters ---------- - recording: BaseRecording + recording : BaseRecording The parent recording bad_channel_ids : list or 1d np.array Channel ids of the bad channels to interpolate. - sigma_um : float or None, default: None + sigma_um : float or None, default : None Distance between sequential channels in um. If None, will use the most common distance between y-axis channels - p : float, default: 1.3 + p : float, default : 1.3 Exponent of the Gaussian kernel. Determines rate of decay for distance weightings - weights : np.array or None, default: None + weights : np.array or None, default : None The weights to give to bad_channel_ids at interpolation. If None, weights are automatically computed Returns ------- - interpolated_recording: InterpolateBadChannelsRecording + interpolated_recording : InterpolateBadChannelsRecording The recording object with interpolated bad channels """ diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index 14a0d36d72..908c350847 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -244,39 +244,39 @@ def correct_motion( * :py:func:`~spikeinterface.sortingcomponents.motion_interpolation.interpolate_motion` - Possible presets: {} + Possible presets : {} Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be transformed - preset: str, default: "nonrigid_accurate" + preset : str, default : "nonrigid_accurate" The preset name - folder: Path str or None, default: None + folder : Path str or None, default : None If not None then intermediate motion info are saved into a folder - output_motion_info: bool, default: False + output_motion_info : bool, default : False If True, then the function returns a `motion_info` dictionary that contains variables to check intermediate steps (motion_histogram, non_rigid_windows, pairwise_displacement) This dictionary is the same when reloaded from the folder - detect_kwargs: dict + detect_kwargs : dict Optional parameters to overwrite the ones in the preset for "detect" step. - select_kwargs: dict + select_kwargs : dict If not None, optional parameters to overwrite the ones in the preset for "select" step. If None, the "select" step is skipped. - localize_peaks_kwargs: dict + localize_peaks_kwargs : dict Optional parameters to overwrite the ones in the preset for "localize" step. - estimate_motion_kwargs: dict + estimate_motion_kwargs : dict Optional parameters to overwrite the ones in the preset for "estimate_motion" step. - interpolate_motion_kwargs: dict + interpolate_motion_kwargs : dict Optional parameters to overwrite the ones in the preset for "detect" step. {} Returns ------- - recording_corrected: Recording + recording_corrected : Recording The motion corrected recording - motion_info: dict + motion_info : dict Optional output if `output_motion_info=True` """ diff --git a/src/spikeinterface/preprocessing/normalize_scale.py b/src/spikeinterface/preprocessing/normalize_scale.py index 08612dffd4..bc2f4224d4 100644 --- a/src/spikeinterface/preprocessing/normalize_scale.py +++ b/src/spikeinterface/preprocessing/normalize_scale.py @@ -46,25 +46,25 @@ class NormalizeByQuantileRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be transformed - scale: float, default: 1.0 + scale : float, default : 1.0 Scale for the output distribution - median: float, default: 0.0 + median : float, default : 0.0 Median for the output distribution - q1: float, default: 0.01 + q1 : float, default : 0.01 Lower quantile used for measuring the scale - q1: float, default: 0.99 + q1 : float, default : 0.99 Upper quantile used for measuring the - mode: "by_channel" | "pool_channel", default: "by_channel" + mode : "by_channel" | "pool_channel", default : "by_channel" If "by_channel" each channel is rescaled independently. - dtype: str or np.dtype, default: "float32" + dtype : str or np.dtype, default : "float32" The dtype of the output traces - **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function + **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns ------- - rescaled_traces: NormalizeByQuantileRecording + rescaled_traces : NormalizeByQuantileRecording The rescaled traces recording extractor object """ @@ -130,18 +130,18 @@ class ScaleRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be transformed - gain: float or array + gain : float or array Scalar for the traces of the recording extractor or array with scalars for each channel - offset: float or array + offset : float or array Offset for the traces of the recording extractor or array with offsets for each channel - dtype: str or np.dtype, default: "float32" + dtype : str or np.dtype, default : "float32" The dtype of the output traces Returns ------- - transform_traces: ScaleRecording + transform_traces : ScaleRecording The transformed traces recording extractor object """ @@ -190,17 +190,17 @@ class CenterRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be centered - mode: "median" | "mean", default: "median" + mode : "median" | "mean", default : "median" The method used to center the traces - dtype: str or np.dtype, default: "float32" + dtype : str or np.dtype, default : "float32" The dtype of the output traces - **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function + **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns ------- - centered_traces: ScaleRecording + centered_traces : ScaleRecording The centered traces recording extractor object """ @@ -238,11 +238,11 @@ class ZScoreRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be centered - mode: "median+mad" | "mean+std", default: "median+mad" + mode : "median+mad" | "mean+std", default : "median+mad" The mode to compute the zscore - dtype: None or dtype + dtype : None or dtype If None the the parent dtype is kept. For integer dtype a int_scale must be also given. gain : None or np.array @@ -253,11 +253,11 @@ class ZScoreRecording(BasePreprocessor): Apply a scaling factor to fit the integer range. This is used when the dtype is an integer, so that the output is scaled. For example, a value of `int_scale=200` will scale the zscore value to a standard deviation of 200. - **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function + **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns ------- - centered_traces: ScaleRecording + centered_traces : ScaleRecording The centered traces recording extractor object """ diff --git a/src/spikeinterface/preprocessing/phase_shift.py b/src/spikeinterface/preprocessing/phase_shift.py index 23f4320053..9708a04749 100644 --- a/src/spikeinterface/preprocessing/phase_shift.py +++ b/src/spikeinterface/preprocessing/phase_shift.py @@ -23,18 +23,18 @@ class PhaseShiftRecording(BasePreprocessor): Parameters ---------- - recording: Recording + recording : Recording The recording. It need to have "inter_sample_shift" in properties. - margin_ms: float, default: 40.0 + margin_ms : float, default : 40.0 Margin in ms for computation. 40ms ensure a very small error when doing chunk processing - inter_sample_shift: None or numpy array, default: None + inter_sample_shift : None or numpy array, default : None If "inter_sample_shift" is not in recording properties, we can externally provide one. Returns ------- - filter_recording: PhaseShiftRecording + filter_recording : PhaseShiftRecording The phase shifted recording object """ diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index f13e50661f..892bfc090e 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -22,21 +22,21 @@ class RemoveArtifactsRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to remove artifacts from - list_triggers: list of lists/arrays + list_triggers : list of lists/arrays One list per segment of int with the stimulation trigger frames - ms_before: float or None, default: 0.5 + ms_before : float or None, default : 0.5 Time interval in ms to remove before the trigger events. If None, then also ms_after must be None and a single sample is removed - ms_after: float or None, default: 3.0 + ms_after : float or None, default : 3.0 Time interval in ms to remove after the trigger events. If None, then also ms_before must be None and a single sample is removed - list_labels: list of lists/arrays or None + list_labels : list of lists/arrays or None One list per segment of labels with the stimulation labels for the given artifacts. labels should be strings, for JSON serialization. Required for "median" and "average" modes. - mode: "zeros", "linear", "cubic", "average", "median", default: "zeros" + mode : "zeros", "linear", "cubic", "average", "median", default : "zeros" Determines what artifacts are replaced by. Can be one of the following: - "zeros": Artifacts are replaced by zeros. @@ -63,31 +63,31 @@ class RemoveArtifactsRecording(BasePreprocessor): continuation of the trace. If the trace starts or ends with an artifact, the gap is filled with the closest available value before or after the artifact. - fit_sample_spacing: float, default: 1.0 + fit_sample_spacing : float, default : 1.0 Determines the spacing (in ms) of reference points for the cubic spline - fit if mode = "cubic". Note: The actual fit samples are + fit if mode = "cubic". Note : The actual fit samples are the median of the 5 data points around the time of each sample point to avoid excessive influence from hyper-local fluctuations. - artifacts: dict or None, default: None + artifacts : dict or None, default : None If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values the artifacts themselves, on all channels (and thus bypassing ms_before and ms_after) - sparsity: dict or None, default: None + sparsity : dict or None, default : None If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values that are boolean mask of the channels where the artifacts should be considered (for subtraction/scaling) - scale_amplitude: False, default: False + scale_amplitude : False, default : False If true, then for mode "median" or "average" the amplitude of the template will be scaled in amplitude at each time occurence to minimize residuals - time_jitter: float, default: 0 + time_jitter : float, default : 0 If non 0, then for mode "median" or "average", a time jitter in ms can be allowed to minimize the residuals - waveforms_kwargs: None + waveforms_kwargs : None Deprecated and ignored Returns ------- - removed_recording: RemoveArtifactsRecording + removed_recording : RemoveArtifactsRecording The recording extractor after artifact removal """ diff --git a/src/spikeinterface/preprocessing/resample.py b/src/spikeinterface/preprocessing/resample.py index 9ec9c5779b..83a2c3577a 100644 --- a/src/spikeinterface/preprocessing/resample.py +++ b/src/spikeinterface/preprocessing/resample.py @@ -28,16 +28,16 @@ class ResampleRecording(BasePreprocessor): The recording extractor to be re-referenced resample_rate : int The resampling frequency - margin : float, default: 100.0 + margin : float, default : 100.0 Margin in ms for computations, will be used to decrease edge effects. - dtype : dtype or None, default: None + dtype : dtype or None, default : None The dtype of the returned traces. If None, the dtype of the parent recording is used. - skip_checks : bool, default: False + skip_checks : bool, default : False If True, checks on sampling frequencies and cutoff filter frequencies are skipped Returns ------- - resample_recording: ResampleRecording + resample_recording : ResampleRecording The resampled recording extractor object. """ diff --git a/src/spikeinterface/preprocessing/silence_periods.py b/src/spikeinterface/preprocessing/silence_periods.py index e35e51eac7..ed0a0c9d28 100644 --- a/src/spikeinterface/preprocessing/silence_periods.py +++ b/src/spikeinterface/preprocessing/silence_periods.py @@ -19,14 +19,14 @@ class SilencedPeriodsRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to silance periods - list_periods: list of lists/arrays + list_periods : list of lists/arrays One list per segment of tuples (start_frame, end_frame) to silence - noise_levels: array + noise_levels : array Noise levels if already computed - mode: "zeros" | "noise, default: "zeros" + mode : "zeros" | "noise, default : "zeros" Determines what periods are replaced by. Can be one of the following: - "zeros": Artifacts are replaced by zeros. @@ -34,11 +34,11 @@ class SilencedPeriodsRecording(BasePreprocessor): - "noise": The periods are filled with a gaussion noise that has the same variance that the one in the recordings, on a per channel basis - **random_chunk_kwargs: Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function + **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns ------- - silence_recording: SilencedPeriodsRecording + silence_recording : SilencedPeriodsRecording The recording extractor after silencing some periods """ diff --git a/src/spikeinterface/preprocessing/unsigned_to_signed.py b/src/spikeinterface/preprocessing/unsigned_to_signed.py index bf7e48838c..32dada11de 100644 --- a/src/spikeinterface/preprocessing/unsigned_to_signed.py +++ b/src/spikeinterface/preprocessing/unsigned_to_signed.py @@ -12,9 +12,9 @@ class UnsignedToSignedRecording(BasePreprocessor): Parameters ---------- - recording: Recording + recording : Recording The recording to be signed. - bit_depth: int or None, default: None + bit_depth : int or None, default : None In case the bit depth of the ADC does not match that of the data type, it specifies the bit depth of the ADC to estimate the offset. For example, a `bit_depth` of 12 will correct for an offset of `2**11` diff --git a/src/spikeinterface/preprocessing/whiten.py b/src/spikeinterface/preprocessing/whiten.py index 1e29a2faac..24da3c7304 100644 --- a/src/spikeinterface/preprocessing/whiten.py +++ b/src/spikeinterface/preprocessing/whiten.py @@ -15,36 +15,36 @@ class WhitenRecording(BasePreprocessor): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be whitened. - dtype: None or dtype, default: None + dtype : None or dtype, default : None If None the the parent dtype is kept. For integer dtype a int_scale must be also given. - mode: "global" | "local", default: "global" + mode : "global" | "local", default : "global" "global" use the entire covariance matrix to compute the W matrix "local" use local covariance (by radius) to compute the W matrix - radius_um: None or float, default: None + radius_um : None or float, default : None Used for mode = "local" to get the neighborhood - apply_mean: bool, default: False + apply_mean : bool, default : False Substract or not the mean matrix M before the dot product with W. - int_scale : None or float, default: None + int_scale : None or float, default : None Apply a scaling factor to fit the integer range. This is used when the dtype is an integer, so that the output is scaled. For example, a value of `int_scale=200` will scale the traces value to a standard deviation of 200. - eps : float or None, default: None + eps : float or None, default : None Small epsilon to regularize SVD. If None, eps is defaulted to 1e-8. If the data is float type and scaled down to very small values, then the eps is automatically set to a small fraction (1e-3) of the median of the squared data. - W : 2d np.array or None, default: None + W : 2d np.array or None, default : None Pre-computed whitening matrix - M : 1d np.array or None, default: None + M : 1d np.array or None, default : None Pre-computed means. M can be None when previously computed with apply_mean=False **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function Returns ------- - whitened_recording: WhitenRecording + whitened_recording : WhitenRecording The whitened recording extractor """ @@ -147,9 +147,9 @@ def compute_whitening_matrix(recording, mode, random_chunk_kwargs, apply_mean, r Keyword arguments for get_random_data_chunks() apply_mean : bool If True, the mean is removed prior to computing the covariance - radius_um : float or None, default: None + radius_um : float or None, default : None Used for mode = "local" to get the neighborhood - eps : float or None, default: None + eps : float or None, default : None Small epsilon to regularize SVD. If None, the default is set to 1e-8, but if the data is float type and scaled down to very small values, eps is automatically set to a small fraction (1e-3) of the median of the squared data. diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index 4cef5e9966..cdd5bc1abb 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -349,7 +349,7 @@ def check_compiled(cls): Returns ------- - is_compiled: bool + is_compiled : bool Boolean indicating if a bash command for cls.compiled_name exists or not """ if cls.compiled_name is None: diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d96768da4f..d72e7abff2 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -62,20 +62,20 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal Parameters ---------- - job_list: list of dict + job_list : list of dict A list a dict that are propagated to run_sorter(...) - engine: str "loop", "joblib", "dask", "slurm" + engine : str "loop", "joblib", "dask", "slurm" The engine to run the list. - * "loop": a simple loop. This engine is - engine_kwargs: dict + * "loop" : a simple loop. This engine is + engine_kwargs : dict - return_output: bool, dfault False + return_output : bool, dfault False Return a sortings or None. This also overwrite kwargs in in run_sorter(with_sorting=True/False) Returns ------- - sortings: None or list of sorting + sortings : None or list of sorting With engine="loop" or "joblib" you can optional get directly the list of sorting result if return_output=True. """ @@ -227,30 +227,30 @@ def run_sorter_by_property( Parameters ---------- - sorter_name: str + sorter_name : str The sorter name - recording: BaseRecording + recording : BaseRecording The recording to be sorted - grouping_property: object + grouping_property : object Property to split by before sorting - folder: str | Path + folder : str | Path The working directory. - mode_if_folder_exists: bool or None, default: None + mode_if_folder_exists : bool or None, default : None Must be None. This is deprecated. If not None then a warning is raise. Will be removed in next release. - engine: "loop" | "joblib" | "dask", default: "loop" + engine : "loop" | "joblib" | "dask", default : "loop" Which engine to use to run sorter. - engine_kwargs: dict + engine_kwargs : dict This contains kwargs specific to the launcher engine: * "loop" : no kwargs * "joblib" : {"n_jobs" : } number of processes * "dask" : {"client":} the dask client for submitting task - verbose: bool, default: False + verbose : bool, default : False Controls sorter verboseness - docker_image: None or str, default: None + docker_image : None or str, default : None If str run the sorter inside a container (docker) using the docker package - **sorter_params: keyword args + **sorter_params : keyword args Spike sorter specific arguments (they can be retrieved with `get_default_sorter_params(sorter_name_or_class)`) Returns diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index b50ddb0a79..eb5fbe10e5 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -60,53 +60,53 @@ _common_param_doc = """ Parameters ---------- - sorter_name: str + sorter_name : str The sorter name - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be spike sorted - folder: str or Path + folder : str or Path Path to output folder - remove_existing_folder: bool + remove_existing_folder : bool If True and folder exists then delete. - delete_output_folder: bool, default: False + delete_output_folder : bool, default : False If True, output folder is deleted - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose - raise_error: bool, default: True + raise_error : bool, default : True If True, an error is raised if spike sorting fails If False, the process continues and the error is logged in the log file. - docker_image: bool or str, default: False + docker_image : bool or str, default : False If True, pull the default docker container for the sorter and run the sorter in that container using docker. Use a str to specify a non-default container. If that container is not local it will be pulled from docker hub. If False, the sorter is run locally - singularity_image: bool or str, default: False + singularity_image : bool or str, default : False If True, pull the default docker container for the sorter and run the sorter in that container using singularity. Use a str to specify a non-default container. If that container is not local it will be pulled from Docker Hub. If False, the sorter is run locally - with_output: bool, default: True + with_output : bool, default : True If True, the output Sorting is returned as a Sorting - delete_container_files: bool, default: True + delete_container_files : bool, default : True If True, the container temporary files are deleted after the sorting is done - extra_requirements: list, default: None + extra_requirements : list, default : None List of extra requirements to install in the container - installation_mode: "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default: "auto" + installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default : "auto" How spikeinterface is installed in the container: - * "auto": if host installation is a pip release then use "github" with tag + * "auto" : if host installation is a pip release then use "github" with tag if host installation is DEV_MODE=True then use "dev" - * "pypi": use pypi with pip install spikeinterface - * "github": use github with `pip install git+https` - * "folder": mount a folder in container and install from this one. + * "pypi" : use pypi with pip install spikeinterface + * "github" : use github with `pip install git+https` + * "folder" : mount a folder in container and install from this one. So the version in the container is a different spikeinterface version from host, useful for cross checks - * "dev": same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host - * "no-install": do not install spikeinterface in the container because it is already installed - spikeinterface_version: str, default: None + * "dev" : same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host + * "no-install" : do not install spikeinterface in the container because it is already installed + spikeinterface_version : str, default : None The spikeinterface version to install in the container. If None, the current version is used - spikeinterface_folder_source: Path or None, default: None + spikeinterface_folder_source : Path or None, default : None In case of installation_mode="folder", the spikeinterface folder source to use to install in the container - output_folder: None, default: None + output_folder : None, default : None Do not use. Deprecated output function to be removed in 0.103. - **sorter_params: keyword args + **sorter_params : keyword args Spike sorter specific arguments (they can be retrieved with `get_default_sorter_params(sorter_name_or_class)`) Returns @@ -205,26 +205,26 @@ def run_sorter_local( Parameters ---------- - sorter_name: str + sorter_name : str The sorter name - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor to be spike sorted - folder: str or Path + folder : str or Path Path to output folder. If None, a folder is created in the current directory - remove_existing_folder: bool, default: True + remove_existing_folder : bool, default : True If True and output_folder exists yet then delete - delete_output_folder: bool, default: False + delete_output_folder : bool, default : False If True, output folder is deleted - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose - raise_error: bool, default: True + raise_error : bool, default : True If True, an error is raised if spike sorting fails. If False, the process continues and the error is logged in the log file - with_output: bool, default: True + with_output : bool, default : True If True, the output Sorting is returned as a Sorting - output_folder: None, default: None + output_folder : None, default : None Do not use. Deprecated output function to be removed in 0.103. - **sorter_params: keyword args + **sorter_params : keyword args """ if isinstance(recording, list): raise Exception("If you want to run several sorters/recordings use run_sorter_jobs(...)") @@ -286,46 +286,46 @@ def run_sorter_container( Parameters ---------- - sorter_name: str + sorter_name : str The sorter name - recording: BaseRecording + recording : BaseRecording The recording extractor to be spike sorted - mode: str - The container mode: "docker" or "singularity" - container_image: str, default: None + mode : str + The container mode : "docker" or "singularity" + container_image : str, default : None The container image name and tag. If None, the default container image is used - output_folder: str, default: None + output_folder : str, default : None Path to output folder - remove_existing_folder: bool, default: True + remove_existing_folder : bool, default : True If True and output_folder exists yet then delete - delete_output_folder: bool, default: False + delete_output_folder : bool, default : False If True, output folder is deleted - verbose: bool, default: False + verbose : bool, default : False If True, output is verbose - raise_error: bool, default: True + raise_error : bool, default : True If True, an error is raised if spike sorting fails - with_output: bool, default: True + with_output : bool, default : True If True, the output Sorting is returned as a Sorting - delete_container_files: bool, default: True + delete_container_files : bool, default : True If True, the container temporary files are deleted after the sorting is done - extra_requirements: list, default: None + extra_requirements : list, default : None List of extra requirements to install in the container - installation_mode: "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default: "auto" + installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default : "auto" How spikeinterface is installed in the container: - * "auto": if host installation is a pip release then use "github" with tag + * "auto" : if host installation is a pip release then use "github" with tag if host installation is DEV_MODE=True then use "dev" - * "pypi": use pypi with pip install spikeinterface - * "github": use github with `pip install git+https` - * "folder": mount a folder in container and install from this one. + * "pypi" : use pypi with pip install spikeinterface + * "github" : use github with `pip install git+https` + * "folder" : mount a folder in container and install from this one. So the version in the container is a different spikeinterface version from host, useful for cross checks - * "dev": same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host - * "no-install": do not install spikeinterface in the container because it is already installed - spikeinterface_version: str, default: None + * "dev" : same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host + * "no-install" : do not install spikeinterface in the container because it is already installed + spikeinterface_version : str, default : None The spikeinterface version to install in the container. If None, the current version is used - spikeinterface_folder_source: Path or None, default: None + spikeinterface_folder_source : Path or None, default : None In case of installation_mode="folder", the spikeinterface folder source to use to install in the container - **sorter_params: keyword args for the sorter + **sorter_params : keyword args for the sorter """ @@ -646,11 +646,11 @@ def read_sorter_folder(folder, register_recording=True, sorting_info=True, raise Parameters ---------- - folder: Pth or str + folder : Pth or str The sorter folder - register_recording: bool, default: True + register_recording : bool, default : True Attach recording (when json or pickle) to the sorting - sorting_info: bool, default: True + sorting_info : bool, default : True Attach sorting info to the sorting. """ folder = Path(folder) diff --git a/src/spikeinterface/sorters/sorterlist.py b/src/spikeinterface/sorters/sorterlist.py index 6c437be09e..105a536617 100644 --- a/src/spikeinterface/sorters/sorterlist.py +++ b/src/spikeinterface/sorters/sorterlist.py @@ -81,12 +81,12 @@ def get_default_sorter_params(sorter_name_or_class): Parameters ---------- - sorter_name_or_class: str or SorterClass + sorter_name_or_class : str or SorterClass The sorter to retrieve default parameters from. Returns ------- - default_params: dict + default_params : dict Dictionary with default params for the specified sorter. """ @@ -105,12 +105,12 @@ def get_sorter_params_description(sorter_name_or_class): Parameters ---------- - sorter_name_or_class: str or SorterClass + sorter_name_or_class : str or SorterClass The sorter to retrieve parameters description from. Returns ------- - params_description: dict + params_description : dict Dictionary with parameter description """ @@ -129,12 +129,12 @@ def get_sorter_description(sorter_name_or_class): Parameters ---------- - sorter_name_or_class: str or SorterClass + sorter_name_or_class : str or SorterClass The sorter to retrieve description from. Returns ------- - params_description: dict + params_description : dict Dictionary with parameter description. """ diff --git a/src/spikeinterface/widgets/all_amplitudes_distributions.py b/src/spikeinterface/widgets/all_amplitudes_distributions.py index 59a69640da..8fe310d986 100644 --- a/src/spikeinterface/widgets/all_amplitudes_distributions.py +++ b/src/spikeinterface/widgets/all_amplitudes_distributions.py @@ -15,12 +15,12 @@ class AllAmplitudesDistributionsWidget(BaseWidget): Parameters ---------- - sorting_analyzer: SortingAnalyzer + sorting_analyzer : SortingAnalyzer The SortingAnalyzer - unit_ids: list + unit_ids : list List of unit ids, default None - unit_colors: None or dict - Dict of colors with key: unit, value: color, default None + unit_colors : None or dict + Dict of colors with key : unit, value : color, default None """ def __init__( diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index efbf6f3f32..343bf30cf9 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -17,22 +17,22 @@ class AmplitudesWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The input waveform extractor - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index (or None if mono-segment) - max_spikes_per_unit : int or None, default: None + max_spikes_per_unit : int or None, default : None Number of max spikes per unit to display. Use None for all spikes - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False If True the unit selector is not displayed (sortingview backend) - plot_histogram : bool, default: False + plot_histogram : bool, default : False If True, an histogram of the amplitudes is plotted on the right axis (matplotlib backend) - bins : int or None, default: None + bins : int or None, default : None If plot_histogram is True, the number of bins for the amplitude histogram. If None this is automatically adjusted - plot_legend : bool, default: True + plot_legend : bool, default : True True includes legend in plot """ diff --git a/src/spikeinterface/widgets/collision.py b/src/spikeinterface/widgets/collision.py index 34f65a2f89..e13a0c04f9 100644 --- a/src/spikeinterface/widgets/collision.py +++ b/src/spikeinterface/widgets/collision.py @@ -11,24 +11,24 @@ class ComparisonCollisionBySimilarityWidget(BaseWidget): Parameters ---------- - comp: CollisionGTComparison + comp : CollisionGTComparison The collision ground truth comparison object - templates: array + templates : array template of units - mode: "heatmap" or "lines" + mode : "heatmap" or "lines" to see collision curves for every pairs ("heatmap") or as lines averaged over pairs. - similarity_bins: array + similarity_bins : array if mode is "lines", the bins used to average the pairs - cmap: string + cmap : string colormap used to show averages if mode is "lines" - metric: "cosine_similarity" + metric : "cosine_similarity" metric for ordering - good_only: True + good_only : True keep only the pairs with a non zero accuracy (found templates) - min_accuracy: float + min_accuracy : float If good only, the minimum accuracy every cell should have, individually, to be considered in a putative pair - unit_ids: list + unit_ids : list List of considered units """ @@ -180,19 +180,19 @@ class StudyComparisonCollisionBySimilarityWidget(BaseWidget): Parameters ---------- - study: CollisionGTStudy + study : CollisionGTStudy The collision study object. - case_keys: list or None + case_keys : list or None A selection of cases to plot, if None, then all. - metric: "cosine_similarity" + metric : "cosine_similarity" metric for ordering - similarity_bins: array + similarity_bins : array if mode is "lines", the bins used to average the pairs - cmap: string + cmap : string colormap used to show averages if mode is "lines" - good_only: False + good_only : False keep only the pairs with a non zero accuracy (found templates) - min_accuracy: float + min_accuracy : float If good only, the minimum accuracy every cell should have, individually, to be considered in a putative pair """ diff --git a/src/spikeinterface/widgets/comparison.py b/src/spikeinterface/widgets/comparison.py index 58dd6f166d..9650b35505 100644 --- a/src/spikeinterface/widgets/comparison.py +++ b/src/spikeinterface/widgets/comparison.py @@ -11,11 +11,11 @@ class ConfusionMatrixWidget(BaseWidget): Parameters ---------- - gt_comparison: GroundTruthComparison + gt_comparison : GroundTruthComparison The ground truth sorting comparison object - count_text: bool + count_text : bool If True counts are displayed as text - unit_ticks: bool + unit_ticks : bool If True unit tick labels are displayed """ @@ -85,15 +85,15 @@ class AgreementMatrixWidget(BaseWidget): Parameters ---------- - sorting_comparison: GroundTruthComparison or SymmetricSortingComparison + sorting_comparison : GroundTruthComparison or SymmetricSortingComparison The sorting comparison object. Can optionally be symmetric if given a SymmetricSortingComparison - ordered: bool, default: True + ordered : bool, default : True Order units with best agreement scores. If True, agreement scores can be seen along a diagonal - count_text: bool, default: True + count_text : bool, default : True If True counts are displayed as text - unit_ticks: bool, default: True + unit_ticks : bool, default : True If True unit tick labels are displayed """ diff --git a/src/spikeinterface/widgets/crosscorrelograms.py b/src/spikeinterface/widgets/crosscorrelograms.py index e70a5775e6..2fcdfcd509 100644 --- a/src/spikeinterface/widgets/crosscorrelograms.py +++ b/src/spikeinterface/widgets/crosscorrelograms.py @@ -17,20 +17,20 @@ class CrossCorrelogramsWidget(BaseWidget): ---------- sorting_analyzer_or_sorting : SortingAnalyzer or BaseSorting The object to compute/get crosscorrelograms from - unit_ids list or None, default: None + unit_ids list or None, default : None List of unit ids - min_similarity_for_correlograms : float, default: 0.2 + min_similarity_for_correlograms : float, default : 0.2 For sortingview backend. Threshold for computing pair-wise cross-correlograms. If template similarity between two units is below this threshold, the cross-correlogram is not displayed - window_ms : float, default: 100.0 + window_ms : float, default : 100.0 Window for CCGs in ms. If correlograms are already computed (e.g. with SortingAnalyzer), this argument is ignored - bin_ms : float, default: 1.0 + bin_ms : float, default : 1.0 Bin size in ms. If correlograms are already computed (e.g. with SortingAnalyzer), this argument is ignored - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False For sortingview backend, if True the unit selector is not displayed - unit_colors: dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values """ diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 3512f31e84..c26417a9fa 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -12,9 +12,9 @@ class StudyRunTimesWidget(BaseWidget): Parameters ---------- - study: GroundTruthStudy + study : GroundTruthStudy A study object. - case_keys: list or None + case_keys : list or None A selection of cases to plot, if None, then all. """ @@ -61,9 +61,9 @@ class StudyUnitCountsWidget(BaseWidget): Parameters ---------- - study: GroundTruthStudy + study : GroundTruthStudy A study object. - case_keys: list or None + case_keys : list or None A selection of cases to plot, if None, then all. """ @@ -133,17 +133,17 @@ class StudyPerformances(BaseWidget): Parameters ---------- - study: GroundTruthStudy + study : GroundTruthStudy A study object. - mode: "ordered" | "snr" | "swarm", default: "ordered" + mode : "ordered" | "snr" | "swarm", default : "ordered" Which plot mode to use: * "ordered": plot performance metrics vs unit indices ordered by decreasing accuracy * "snr": plot performance metrics vs snr * "swarm": plot performance metrics as a swarm plot (see seaborn.swarmplot for details) - performance_names: list or tuple, default: ("accuracy", "precision", "recall") + performance_names : list or tuple, default : ("accuracy", "precision", "recall") Which performances to plot ("accuracy", "precision", "recall") - case_keys: list or None + case_keys : list or None A selection of cases to plot, if None, then all. """ @@ -234,11 +234,11 @@ class StudyAgreementMatrix(BaseWidget): Parameters ---------- - study: GroundTruthStudy + study : GroundTruthStudy A study object. - case_keys: list or None + case_keys : list or None A selection of cases to plot, if None, then all. - ordered: bool + ordered : bool Order units with best agreement scores. This enable to see agreement on a diagonal. """ @@ -307,9 +307,9 @@ class StudySummary(BaseWidget): Parameters ---------- - study: GroundTruthStudy + study : GroundTruthStudy A study object. - case_keys: list or None, default: None + case_keys : list or None, default : None A selection of cases to plot, if None, then all. """ diff --git a/src/spikeinterface/widgets/isi_distribution.py b/src/spikeinterface/widgets/isi_distribution.py index 33c39da103..6ffe280e83 100644 --- a/src/spikeinterface/widgets/isi_distribution.py +++ b/src/spikeinterface/widgets/isi_distribution.py @@ -13,13 +13,13 @@ class ISIDistributionWidget(BaseWidget): Parameters ---------- - sorting: SortingExtractor + sorting : SortingExtractor The sorting extractor object - unit_ids: list + unit_ids : list List of unit ids - bins_ms: int + bins_ms : int Bin size in ms - window_ms: float + window_ms : float Window size in ms """ diff --git a/src/spikeinterface/widgets/motion.py b/src/spikeinterface/widgets/motion.py index 9d64c89e46..49f4ff4e94 100644 --- a/src/spikeinterface/widgets/motion.py +++ b/src/spikeinterface/widgets/motion.py @@ -11,25 +11,25 @@ class MotionWidget(BaseWidget): Parameters ---------- - motion_info: dict + motion_info : dict The motion info return by correct_motion() or load back with load_motion_info() - recording : RecordingExtractor, default: None + recording : RecordingExtractor, default : None The recording extractor object (only used to get "real" times) - sampling_frequency : float, default: None + sampling_frequency : float, default : None The sampling frequency (needed if recording is None) - depth_lim : tuple or None, default: None + depth_lim : tuple or None, default : None The min and max depth to display, if None (min and max of the recording) - motion_lim : tuple or None, default: None + motion_lim : tuple or None, default : None The min and max motion to display, if None (min and max of the motion) - color_amplitude : bool, default: False + color_amplitude : bool, default : False If True, the color of the scatter points is the amplitude of the peaks - scatter_decimate : int, default: None + scatter_decimate : int, default : None If > 1, the scatter points are decimated - amplitude_cmap : str, default: "inferno" + amplitude_cmap : str, default : "inferno" The colormap to use for the amplitude - amplitude_clim : tuple or None, default: None + amplitude_clim : tuple or None, default : None The min and max amplitude to display, if None (min and max of the amplitudes) - amplitude_alpha : float, default: 1 + amplitude_alpha : float, default : 1 The alpha of the scatter points """ diff --git a/src/spikeinterface/widgets/multicomparison.py b/src/spikeinterface/widgets/multicomparison.py index 2d4a22a2b3..a86bc58d50 100644 --- a/src/spikeinterface/widgets/multicomparison.py +++ b/src/spikeinterface/widgets/multicomparison.py @@ -13,17 +13,17 @@ class MultiCompGraphWidget(BaseWidget): Parameters ---------- - multi_comparison: BaseMultiComparison + multi_comparison : BaseMultiComparison The multi comparison object - draw_labels: bool, default: False + draw_labels : bool, default : False If True unit labels are shown - node_cmap: matplotlib colormap, default: "viridis" + node_cmap : matplotlib colormap, default : "viridis" The colormap to be used for the nodes - edge_cmap: matplotlib colormap, default: "hot" + edge_cmap : matplotlib colormap, default : "hot" The colormap to be used for the edges - alpha_edges: float, default: 0.5 + alpha_edges : float, default : 0.5 Alpha value for edges - colorbar: bool, default: False + colorbar : bool, default : False If True a colorbar for the edges is plotted """ @@ -119,15 +119,15 @@ class MultiCompGlobalAgreementWidget(BaseWidget): Parameters ---------- - multi_comparison: BaseMultiComparison + multi_comparison : BaseMultiComparison The multi comparison object - plot_type: "pie" | "bar", default: "pie" + plot_type : "pie" | "bar", default : "pie" The plot type - cmap: matplotlib colormap, default: "YlOrRd" + cmap : matplotlib colormap, default : "YlOrRd" The colormap to be used for the nodes - fontsize: int, default: 9 + fontsize : int, default : 9 The text fontsize - show_legend: bool, default: True + show_legend : bool, default : True If True a legend is shown """ @@ -197,15 +197,15 @@ class MultiCompAgreementBySorterWidget(BaseWidget): Parameters ---------- - multi_comparison: BaseMultiComparison + multi_comparison : BaseMultiComparison The multi comparison object - plot_type: "pie" | "bar", default: "pie + plot_type : "pie" | "bar", default : "pie The plot type - cmap: matplotlib colormap, default: "Reds" + cmap : matplotlib colormap, default : "Reds" The colormap to be used for the nodes - fontsize: int, default: 9 + fontsize : int, default : 9 The text fontsize - show_legend: bool + show_legend : bool Show the legend in the last axes """ diff --git a/src/spikeinterface/widgets/peak_activity.py b/src/spikeinterface/widgets/peak_activity.py index 2339166bfb..0121e40cf3 100644 --- a/src/spikeinterface/widgets/peak_activity.py +++ b/src/spikeinterface/widgets/peak_activity.py @@ -14,21 +14,21 @@ class PeakActivityMapWidget(BaseWidget): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object. - peaks: None or numpy array + peaks : None or numpy array Optionally can give already detected peaks to avoid multiple computation. - detect_peaks_kwargs: None or dict, default: None + detect_peaks_kwargs : None or dict, default : None If peaks is None here the kwargs for detect_peak function. - bin_duration_s: None or float, default: None + bin_duration_s : None or float, default : None If None then static image If not None then it is an animation per bin. - with_contact_color: bool, default: True + with_contact_color : bool, default : True Plot rates with contact colors - with_interpolated_map: bool, default: True + with_interpolated_map : bool, default : True Plot rates with interpolated map - with_channel_ids: bool, default: False + with_channel_ids : bool, default : False Add channel ids text on the probe diff --git a/src/spikeinterface/widgets/probe_map.py b/src/spikeinterface/widgets/probe_map.py index baf9ee86b8..eb771f5a73 100644 --- a/src/spikeinterface/widgets/probe_map.py +++ b/src/spikeinterface/widgets/probe_map.py @@ -13,13 +13,13 @@ class ProbeMapWidget(BaseWidget): Parameters ---------- - recording: RecordingExtractor + recording : RecordingExtractor The recording extractor object - color_channels: list or matplotlib color + color_channels : list or matplotlib color List of colors to be associated with each channel_id, if only one color is present all channels will take the specified color - with_channel_ids: bool False default + with_channel_ids : bool False default Add channel ids text on the probe - **plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe_group() function + **plot_probe_kwargs : keyword arguments for probeinterface.plotting.plot_probe_group() function """ diff --git a/src/spikeinterface/widgets/quality_metrics.py b/src/spikeinterface/widgets/quality_metrics.py index 3f9ee549be..8d7a256531 100644 --- a/src/spikeinterface/widgets/quality_metrics.py +++ b/src/spikeinterface/widgets/quality_metrics.py @@ -12,15 +12,15 @@ class QualityMetricsWidget(MetricsBaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get quality metrics from - unit_ids: list or None, default: None + unit_ids : list or None, default : None List of unit ids - include_metrics: list or None, default: None + include_metrics : list or None, default : None If given, a list of quality metrics to include - skip_metrics: list or None, default: None + skip_metrics : list or None, default : None If given, a list of quality metrics to skip - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False For sortingview backend, if True the unit selector is not displayed """ diff --git a/src/spikeinterface/widgets/rasters.py b/src/spikeinterface/widgets/rasters.py index 957eaadcc9..ca579c975f 100644 --- a/src/spikeinterface/widgets/rasters.py +++ b/src/spikeinterface/widgets/rasters.py @@ -12,15 +12,15 @@ class RasterWidget(BaseWidget): Parameters ---------- - sorting: SortingExtractor + sorting : SortingExtractor The sorting extractor object - segment_index: None or int + segment_index : None or int The segment index. - unit_ids: list + unit_ids : list List of unit ids - time_range: list + time_range : list List with start time and end time - color: matplotlib color + color : matplotlib color The color to be used """ diff --git a/src/spikeinterface/widgets/sorting_summary.py b/src/spikeinterface/widgets/sorting_summary.py index 24b4ca8022..00a05e445b 100644 --- a/src/spikeinterface/widgets/sorting_summary.py +++ b/src/spikeinterface/widgets/sorting_summary.py @@ -24,25 +24,25 @@ class SortingSummaryWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer object - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - sparsity : ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - max_amplitudes_per_unit : int or None, default: None + max_amplitudes_per_unit : int or None, default : None Maximum number of spikes per unit for plotting amplitudes. If None, all spikes are plotted - min_similarity_for_correlograms : float, default: 0.2 + min_similarity_for_correlograms : float, default : 0.2 Threshold for computing pair-wise cross-correlograms. If template similarity between two units is below this threshold, the cross-correlogram is not computed (sortingview backend) - curation : bool, default: False + curation : bool, default : False If True, manual curation is enabled (sortingview backend) - label_choices : list or None, default: None + label_choices : list or None, default : None List of labels to be added to the curation table (sortingview backend) - unit_table_properties : list or None, default: None + unit_table_properties : list or None, default : None List of properties to be added to the unit table (sortingview backend) """ diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 94c9def630..7f7b0190c8 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -15,24 +15,24 @@ class SpikeLocationsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get spike locations from - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - segment_index : int or None, default: None + segment_index : int or None, default : None The segment index (or None if mono-segment) - max_spikes_per_unit : int or None, default: 500 + max_spikes_per_unit : int or None, default : 500 Number of max spikes per unit to display. Use None for all spikes. - with_channel_ids : bool, default: False + with_channel_ids : bool, default : False Add channel ids text on the probe - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False For sortingview backend, if True the unit selector is not displayed - plot_all_units : bool, default: True + plot_all_units : bool, default : True If True, all units are plotted. The unselected ones (not in unit_ids), are plotted in grey (matplotlib backend) - plot_legend : bool, default: False + plot_legend : bool, default : False If True, the legend is plotted (matplotlib backend) - hide_axis : bool, default: False + hide_axis : bool, default : False If True, the axis is set to off (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index 8b69c5b7bf..0e257beeda 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -21,44 +21,44 @@ class SpikesOnTracesWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer - channel_ids : list or None, default: None + channel_ids : list or None, default : None The channel ids to display - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - order_channel_by_depth : bool, default: False + order_channel_by_depth : bool, default : False If true orders channel by depth - time_range: list or None, default: None + time_range : list or None, default : None List with start time and end time in seconds - sparsity : ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values If None, then the get_unit_colors() is internally used. (matplotlib backend) - mode : "line" | "map" | "auto", default: "auto" + mode : "line" | "map" | "auto", default : "auto" * "line": classical for low channel count * "map": for high channel count use color heat map * "auto": auto switch depending on the channel count ("line" if less than 64 channels, "map" otherwise) - return_scaled : bool, default: False + return_scaled : bool, default : False If True and the recording has scaled traces, it plots the scaled traces - cmap : str, default: "RdBu" + cmap : str, default : "RdBu" matplotlib colormap used in mode "map" - show_channel_ids : bool, default: False + show_channel_ids : bool, default : False Set yticks with channel ids - color_groups : bool, default: False + color_groups : bool, default : False If True groups are plotted with different colors - color : str or None, default: None + color : str or None, default : None The color used to draw the traces - clim : None, tuple or dict, default: None + clim : None, tuple or dict, default : None When mode is "map", this argument controls color limits. If dict, keys should be the same as recording keys - scale : float, default: 1 + scale : float, default : 1 Scale factor for the traces - with_colorbar : bool, default: True + with_colorbar : bool, default : True When mode is "map", a colorbar is added - tile_size : int, default: 512 + tile_size : int, default : 512 For sortingview backend, the size of each tile in the rendered image - seconds_per_row : float, default: 0.2 + seconds_per_row : float, default : 0.2 For "map" mode and sortingview backend, seconds to render in each row """ diff --git a/src/spikeinterface/widgets/template_metrics.py b/src/spikeinterface/widgets/template_metrics.py index b80c863e75..ae6c233429 100644 --- a/src/spikeinterface/widgets/template_metrics.py +++ b/src/spikeinterface/widgets/template_metrics.py @@ -12,15 +12,15 @@ class TemplateMetricsWidget(MetricsBaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get quality metrics from - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - include_metrics : list or None, default: None + include_metrics : list or None, default : None If given list of quality metrics to include - skip_metrics : list or None or None, default: None + skip_metrics : list or None or None, default : None If given, a list of quality metrics to skip - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False For sortingview backend, if True the unit selector is not displayed """ diff --git a/src/spikeinterface/widgets/template_similarity.py b/src/spikeinterface/widgets/template_similarity.py index b469d9901f..80f1e82740 100644 --- a/src/spikeinterface/widgets/template_similarity.py +++ b/src/spikeinterface/widgets/template_similarity.py @@ -14,16 +14,16 @@ class TemplateSimilarityWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get template similarity from - unit_ids : list or None, default: None - List of unit ids default: None - display_diagonal_values : bool, default: False + unit_ids : list or None, default : None + List of unit ids default : None + display_diagonal_values : bool, default : False If False, the diagonal is displayed as zeros. If True, the similarity values (all 1s) are displayed - cmap : matplotlib colormap, default: "viridis" + cmap : matplotlib colormap, default : "viridis" The matplotlib colormap - show_unit_ticks : bool, default: False + show_unit_ticks : bool, default : False If True, ticks display unit ids - show_colorbar : bool, default: True + show_colorbar : bool, default : True If True, color bar is displayed """ diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index d5bfe8caa4..468edb470b 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -15,50 +15,50 @@ class TracesWidget(BaseWidget): Parameters ---------- - recording: RecordingExtractor, dict, or list + recording : RecordingExtractor, dict, or list The recording extractor object. If dict (or list) then it is a multi-layer display to compare, for example, different processing steps - segment_index: None or int, default: None + segment_index : None or int, default : None The segment index (required for multi-segment recordings) - channel_ids: list or None, default: None + channel_ids : list or None, default : None The channel ids to display - order_channel_by_depth: bool, default: False + order_channel_by_depth : bool, default : False Reorder channel by depth - time_range: list, tuple or None, default: None + time_range : list, tuple or None, default : None List with start time and end time - mode: "line" | "map" | "auto", default: "auto" + mode : "line" | "map" | "auto", default : "auto" Three possible modes * "line": classical for low channel count * "map": for high channel count use color heat map * "auto": auto switch depending on the channel count ("line" if less than 64 channels, "map" otherwise) - return_scaled: bool, default: False + return_scaled : bool, default : False If True and the recording has scaled traces, it plots the scaled traces - events: np.array | list[np.narray] or None, default: None + events : np.array | list[np.narray] or None, default : None Events to display as vertical lines. The numpy arrays cen either be of dtype float, with event times in seconds, or a structured array with the "time" field, and optional "duration" and "label" fields. For multi-segment recordings, provide a list of numpy array events, one for each segment. - cmap: matplotlib colormap, default: "RdBu_r" + cmap : matplotlib colormap, default : "RdBu_r" matplotlib colormap used in mode "map" - show_channel_ids: bool, default: False + show_channel_ids : bool, default : False Set yticks with channel ids - color_groups: bool, default: False + color_groups : bool, default : False If True groups are plotted with different colors - color: str or None, default: None + color : str or None, default : None The color used to draw the traces - clim: None, tuple or dict, default: None + clim : None, tuple or dict, default : None When mode is "map", this argument controls color limits. If dict, keys should be the same as recording keys - scale: float, default: 1 + scale : float, default : 1 Scale factor for the traces - with_colorbar: bool, default: True + with_colorbar : bool, default : True When mode is "map", a colorbar is added - tile_size: int, default: 1500 + tile_size : int, default : 1500 For sortingview backend, the size of each tile in the rendered image - seconds_per_row: float, default: 0.2 + seconds_per_row : float, default : 0.2 For "map" mode and sortingview backend, seconds to render in each row - add_legend : bool, default: True + add_legend : bool, default : True If True adds legend to figures """ diff --git a/src/spikeinterface/widgets/unit_depths.py b/src/spikeinterface/widgets/unit_depths.py index c2e9c06863..5627f766ee 100644 --- a/src/spikeinterface/widgets/unit_depths.py +++ b/src/spikeinterface/widgets/unit_depths.py @@ -18,11 +18,11 @@ class UnitDepthsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer object - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values - depth_axis : int, default: 1 + depth_axis : int, default : 1 The dimension of unit_locations that is depth - peak_sign: "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Sign of peak for amplitudes """ diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 3329c2183c..2f5742eca6 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -18,20 +18,20 @@ class UnitLocationsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer that must contains "unit_locations" extension - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - with_channel_ids : bool, default: False + with_channel_ids : bool, default : False Add channel ids text on the probe - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False If True, the unit selector is not displayed (sortingview backend) - plot_all_units : bool, default: True + plot_all_units : bool, default : True If True, all units are plotted. The unselected ones (not in unit_ids), are plotted in grey (matplotlib backend) - plot_legend : bool, default: False + plot_legend : bool, default : False If True, the legend is plotted (matplotlib backend) - hide_axis : bool, default: False + hide_axis : bool, default : False If True, the axis is set to off (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/unit_presence.py b/src/spikeinterface/widgets/unit_presence.py index 746868b89d..5a9d43af11 100644 --- a/src/spikeinterface/widgets/unit_presence.py +++ b/src/spikeinterface/widgets/unit_presence.py @@ -11,15 +11,15 @@ class UnitPresenceWidget(BaseWidget): Parameters ---------- - sorting: SortingExtractor + sorting : SortingExtractor The sorting extractor object - segment_index: None or int + segment_index : None or int The segment index. - time_range: list or None, default: None + time_range : list or None, default : None List with start time and end time - bin_duration_s: float, default: 0.5 + bin_duration_s : float, default : 0.5 Bin size (in seconds) for the heat map time axis - smooth_sigma: float, default: 4.5 + smooth_sigma : float, default : 4.5 Sigma for the Gaussian kernel (in number of bins) """ diff --git a/src/spikeinterface/widgets/unit_probe_map.py b/src/spikeinterface/widgets/unit_probe_map.py index 24b74ec954..3df9354840 100644 --- a/src/spikeinterface/widgets/unit_probe_map.py +++ b/src/spikeinterface/widgets/unit_probe_map.py @@ -20,14 +20,14 @@ class UnitProbeMapWidget(BaseWidget): Parameters ---------- - sorting_analyzer: SortingAnalyzer - unit_ids: list + sorting_analyzer : SortingAnalyzer + unit_ids : list List of unit ids. - channel_ids: list + channel_ids : list The channel ids to display - animated: bool, default: False + animated : bool, default : False Animation for amplitude on time - with_channel_ids: bool, default: False + with_channel_ids : bool, default : False add channel ids text on the probe """ diff --git a/src/spikeinterface/widgets/unit_summary.py b/src/spikeinterface/widgets/unit_summary.py index 0b2a348edf..d0457e52fa 100644 --- a/src/spikeinterface/widgets/unit_summary.py +++ b/src/spikeinterface/widgets/unit_summary.py @@ -25,9 +25,9 @@ class UnitSummaryWidget(BaseWidget): The SortingAnalyzer object unit_id : int or str The unit id to plot the summary of - unit_colors : dict or None, default: None + unit_colors : dict or None, default : None If given, a dictionary with unit ids as keys and colors as values, - sparsity : ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None Optional ChannelSparsity to apply. If SortingAnalyzer is already sparse, the argument is ignored """ diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index f6e16abaae..4b41b51398 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -19,64 +19,64 @@ class UnitWaveformsWidget(BaseWidget): sorting_analyzer_or_templates : SortingAnalyzer | Templates The SortingAnalyzer or Templates object. If Templates is given, the "plot_waveforms" argument is set to False - channel_ids: list or None, default: None + channel_ids : list or None, default : None The channel ids to display - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - plot_templates : bool, default: True + plot_templates : bool, default : True If True, templates are plotted over the waveforms - sparsity : ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - set_title : bool, default: True + set_title : bool, default : True Create a plot title with the unit number if True - plot_channels : bool, default: False + plot_channels : bool, default : False Plot channel locations below traces - unit_selected_waveforms : None or dict, default: None + unit_selected_waveforms : None or dict, default : None A dict key is unit_id and value is the subset of waveforms indices that should be be displayed (matplotlib backend) - max_spikes_per_unit : int or None, default: 50 + max_spikes_per_unit : int or None, default : 50 If given and unit_selected_waveforms is None, only max_spikes_per_unit random units are displayed per waveform, (matplotlib backend) - scale : float, default: 1 + scale : float, default : 1 Scale factor for the waveforms/templates (matplotlib backend) - widen_narrow_scale : float, default: 1 + widen_narrow_scale : float, default : 1 Scale factor for the x-axis of the waveforms/templates (matplotlib backend) - axis_equal : bool, default: False + axis_equal : bool, default : False Equal aspect ratio for x and y axis, to visualize the array geometry to scale - lw_waveforms : float, default: 1 + lw_waveforms : float, default : 1 Line width for the waveforms, (matplotlib backend) - lw_templates : float, default: 2 + lw_templates : float, default : 2 Line width for the templates, (matplotlib backend) - unit_colors : None or dict, default: None + unit_colors : None or dict, default : None A dict key is unit_id and value is any color format handled by matplotlib. If None, then the get_unit_colors() is internally used. (matplotlib / ipywidgets backend) - alpha_waveforms : float, default: 0.5 + alpha_waveforms : float, default : 0.5 Alpha value for waveforms (matplotlib backend) - alpha_templates : float, default: 1 + alpha_templates : float, default : 1 Alpha value for templates, (matplotlib backend) - shade_templates : bool, default: True + shade_templates : bool, default : True If True, templates are shaded, see templates_percentile_shading argument - templates_percentile_shading : float, tuple/list of floats, or None, default: (1, 25, 75, 99) + templates_percentile_shading : float, tuple/list of floats, or None, default : (1, 25, 75, 99) It controls the shading of the templates. If None, the shading is +/- the standard deviation of the templates. If float, it controls the percentile of the template values used to shade the templates. - Note that it is one-sided: if 5 is given, the 5th and 95th percentiles are used to shade + Note that it is one-sided : if 5 is given, the 5th and 95th percentiles are used to shade the templates. If list of floats, it needs to be have an even number of elements which control the lower and upper percentile used to shade the templates. The first half of the elements are used for the lower bounds, and the second half for the upper bounds. Inner elements produce darker shadings. For sortingview backend only 2 or 4 elements are supported. - scalebar : bool, default: False + scalebar : bool, default : False Display a scale bar on the waveforms plot (matplotlib backend) - hide_unit_selector : bool, default: False + hide_unit_selector : bool, default : False For sortingview backend, if True the unit selector is not displayed - same_axis : bool, default: False + same_axis : bool, default : False If True, waveforms and templates are displayed on the same axis (matplotlib backend) - x_offset_units : bool, default: False + x_offset_units : bool, default : False In case same_axis is True, this parameter allow to x-offset the waveforms for different units (recommended for a few units) (matlotlib backend) - plot_legend : bool, default: True + plot_legend : bool, default : True Display legend (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index 6ef1a7a782..9ff10331c7 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -16,21 +16,21 @@ class UnitWaveformDensityMapWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer for calculating waveforms - channel_ids : list or None, default: None + channel_ids : list or None, default : None The channel ids to display - unit_ids : list or None, default: None + unit_ids : list or None, default : None List of unit ids - sparsity : ChannelSparsity or None, default: None + sparsity : ChannelSparsity or None, default : None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - use_max_channel : bool, default: False + use_max_channel : bool, default : False Use only the max channel - peak_sign : "neg" | "pos" | "both", default: "neg" + peak_sign : "neg" | "pos" | "both", default : "neg" Used to detect max channel only when use_max_channel=True - unit_colors : None or dict, default: None + unit_colors : None or dict, default : None A dict key is unit_id and value is any color format handled by matplotlib. If None, then the get_unit_colors() is internally used - same_axis : bool, default: False + same_axis : bool, default : False If True then all density are plot on the same axis and then channels is the union all channel per units """ From fa261ea2fc2ed0a762a879a1b44285524c800e9e Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Fri, 31 May 2024 14:47:19 +0100 Subject: [PATCH 2/5] Change "default :" to "default:" --- .../comparison/multicomparisons.py | 22 +++---- .../comparison/paircomparisons.py | 64 +++++++++---------- src/spikeinterface/core/baserecording.py | 48 +++++++------- src/spikeinterface/core/basesnippets.py | 6 +- src/spikeinterface/core/basesorting.py | 20 +++--- .../core/binaryrecordingextractor.py | 16 ++--- src/spikeinterface/core/job_tools.py | 22 +++---- src/spikeinterface/core/numpyextractors.py | 10 +-- src/spikeinterface/core/recording_tools.py | 58 ++++++++--------- src/spikeinterface/core/sortinganalyzer.py | 24 +++---- src/spikeinterface/core/sparsity.py | 14 ++-- src/spikeinterface/core/template_tools.py | 10 +-- src/spikeinterface/core/zarrextractors.py | 16 ++--- src/spikeinterface/curation/auto_merge.py | 40 ++++++------ .../curation/curationsorting.py | 4 +- .../curation/mergeunitssorting.py | 2 +- .../curation/remove_duplicated_spikes.py | 2 +- .../curation/remove_redundant.py | 20 +++--- .../curation/splitunitsorting.py | 2 +- src/spikeinterface/exporters/report.py | 10 +-- src/spikeinterface/exporters/to_phy.py | 20 +++--- src/spikeinterface/extractors/cbin_ibl.py | 4 +- .../extractors/herdingspikesextractors.py | 2 +- .../extractors/klustaextractors.py | 2 +- .../extractors/mdaextractors.py | 14 ++-- .../extractors/neoextractors/alphaomega.py | 8 +-- .../extractors/neoextractors/axona.py | 2 +- .../extractors/neoextractors/biocam.py | 10 +-- .../extractors/neoextractors/blackrock.py | 12 ++-- .../extractors/neoextractors/ced.py | 8 +-- .../extractors/neoextractors/intan.py | 6 +- .../extractors/neoextractors/maxwell.py | 10 +-- .../extractors/neoextractors/mcsraw.py | 8 +-- .../extractors/neoextractors/mearec.py | 2 +- .../extractors/neoextractors/neuralynx.py | 14 ++-- .../extractors/neoextractors/neuroexplorer.py | 6 +- .../extractors/neoextractors/neuroscope.py | 18 +++--- .../extractors/neoextractors/nix.py | 8 +-- .../extractors/neoextractors/openephys.py | 40 ++++++------ .../extractors/neoextractors/plexon.py | 6 +- .../extractors/neoextractors/plexon2.py | 8 +-- .../extractors/neoextractors/spike2.py | 6 +- .../extractors/neoextractors/spikegadgets.py | 6 +- .../extractors/neoextractors/spikeglx.py | 8 +-- .../extractors/neoextractors/tdt.py | 6 +- .../extractors/nwbextractors.py | 56 ++++++++-------- .../extractors/phykilosortextractors.py | 16 ++--- src/spikeinterface/extractors/toy_example.py | 18 +++--- src/spikeinterface/generation/drift_tools.py | 14 ++-- src/spikeinterface/preprocessing/clip.py | 18 +++--- .../preprocessing/common_reference.py | 12 ++-- .../preprocessing/correct_lsb.py | 8 +-- .../preprocessing/depth_order.py | 4 +- .../preprocessing/detect_bad_channels.py | 48 +++++++------- src/spikeinterface/preprocessing/filter.py | 12 ++-- .../preprocessing/filter_gaussian.py | 2 +- .../preprocessing/interpolate_bad_channels.py | 6 +- src/spikeinterface/preprocessing/motion.py | 6 +- .../preprocessing/normalize_scale.py | 20 +++--- .../preprocessing/phase_shift.py | 4 +- .../preprocessing/remove_artifacts.py | 16 ++--- src/spikeinterface/preprocessing/resample.py | 6 +- .../preprocessing/silence_periods.py | 2 +- .../preprocessing/unsigned_to_signed.py | 2 +- src/spikeinterface/preprocessing/whiten.py | 20 +++--- src/spikeinterface/sorters/launcher.py | 8 +-- src/spikeinterface/sorters/runsorter.py | 64 +++++++++---------- src/spikeinterface/widgets/amplitudes.py | 14 ++-- src/spikeinterface/widgets/comparison.py | 6 +- .../widgets/crosscorrelograms.py | 12 ++-- src/spikeinterface/widgets/gtstudy.py | 6 +- src/spikeinterface/widgets/motion.py | 18 +++--- src/spikeinterface/widgets/multicomparison.py | 24 +++---- src/spikeinterface/widgets/peak_activity.py | 10 +-- src/spikeinterface/widgets/quality_metrics.py | 10 +-- src/spikeinterface/widgets/sorting_summary.py | 14 ++-- src/spikeinterface/widgets/spike_locations.py | 18 +++--- .../widgets/spikes_on_traces.py | 34 +++++----- .../widgets/template_metrics.py | 10 +-- .../widgets/template_similarity.py | 12 ++-- src/spikeinterface/widgets/traces.py | 34 +++++----- src/spikeinterface/widgets/unit_depths.py | 6 +- src/spikeinterface/widgets/unit_locations.py | 14 ++-- src/spikeinterface/widgets/unit_presence.py | 6 +- src/spikeinterface/widgets/unit_probe_map.py | 4 +- src/spikeinterface/widgets/unit_summary.py | 4 +- src/spikeinterface/widgets/unit_waveforms.py | 46 ++++++------- .../widgets/unit_waveforms_density_map.py | 14 ++-- 88 files changed, 661 insertions(+), 661 deletions(-) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index f6db1ab7a5..7cde985b37 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -27,22 +27,22 @@ class MultiSortingComparison(BaseMultiComparison, MixinSpikeTrainComparison): ---------- sorting_list : list List of sorting extractor objects to be compared - name_list : list, default : None + name_list : list, default: None List of spike sorter names. If not given, sorters are named as "sorter0", "sorter1", "sorter2", etc. - delta_time : float, default : 0.4 + delta_time : float, default: 0.4 Number of ms to consider coincident spikes - match_score : float, default : 0.5 + match_score : float, default: 0.5 Minimum agreement score to match units - chance_score : float, default : 0.1 + chance_score : float, default: 0.1 Minimum agreement score to for a possible match - n_jobs : int, default : -1 + n_jobs : int, default: -1 Number of cores to use in parallel. Uses all available if -1 - spiketrain_mode : "union" | "intersection", default : "union" + spiketrain_mode : "union" | "intersection", default: "union" Mode to extract agreement spike trains: - "union" : spike trains are the union between the spike trains of the best matching two sorters - "intersection" : spike trains are the intersection between the spike trains of the best matching two sorters - verbose : bool, default : False + verbose : bool, default: False if True, output is verbose Returns @@ -311,13 +311,13 @@ class MultiTemplateComparison(BaseMultiComparison, MixinTemplateComparison): ---------- waveform_list : list List of waveform extractor objects to be compared - name_list : list, default : None + name_list : list, default: None List of session names. If not given, sorters are named as "sess0", "sess1", "sess2", etc. - match_score : float, default : 0.8 + match_score : float, default: 0.8 Minimum agreement score to match units - chance_score : float, default : 0.3 + chance_score : float, default: 0.3 Minimum agreement score to for a possible match - verbose : bool, default : False + verbose : bool, default: False if True, output is verbose Returns diff --git a/src/spikeinterface/comparison/paircomparisons.py b/src/spikeinterface/comparison/paircomparisons.py index fac711cda0..865451705c 100644 --- a/src/spikeinterface/comparison/paircomparisons.py +++ b/src/spikeinterface/comparison/paircomparisons.py @@ -116,19 +116,19 @@ class SymmetricSortingComparison(BasePairSorterComparison): The first sorting for the comparison sorting2 : SortingExtractor The second sorting for the comparison - sorting1_name : str, default : None + sorting1_name : str, default: None The name of sorter 1 - sorting2_name : : str, default : None + sorting2_name : : str, default: None The name of sorter 2 - delta_time : float, default : 0.4 + delta_time : float, default: 0.4 Number of ms to consider coincident spikes - match_score : float, default : 0.5 + match_score : float, default: 0.5 Minimum agreement score to match units - chance_score : float, default : 0.1 + chance_score : float, default: 0.1 Minimum agreement score to for a possible match - n_jobs : int, default : -1 + n_jobs : int, default: -1 Number of cores to use in parallel. Uses all available if -1 - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose Returns @@ -219,35 +219,35 @@ class GroundTruthComparison(BasePairSorterComparison): The first sorting for the comparison tested_sorting : SortingExtractor The second sorting for the comparison - gt_name : str, default : None + gt_name : str, default: None The name of sorter 1 - tested_name : : str, default : None + tested_name : : str, default: None The name of sorter 2 - delta_time : float, default : 0.4 + delta_time : float, default: 0.4 Number of ms to consider coincident spikes - match_score : float, default : 0.5 + match_score : float, default: 0.5 Minimum agreement score to match units - chance_score : float, default : 0.1 + chance_score : float, default: 0.1 Minimum agreement score to for a possible match - redundant_score : float, default : 0.2 + redundant_score : float, default: 0.2 Agreement score above which units are redundant - overmerged_score : float, default : 0.2 + overmerged_score : float, default: 0.2 Agreement score above which units can be overmerged - well_detected_score : float, default : 0.8 + well_detected_score : float, default: 0.8 Agreement score above which units are well detected - exhaustive_gt : bool, default : False + exhaustive_gt : bool, default: False Tell if the ground true is "exhaustive" or not. In other world if the GT have all possible units. It allows more performance measurement. For instance, MEArec simulated dataset have exhaustive_gt=True - match_mode : "hungarian" | "best", default : "hungarian" + match_mode : "hungarian" | "best", default: "hungarian" The method to match units - n_jobs : int, default : -1 + n_jobs : int, default: -1 Number of cores to use in parallel. Uses all available if -1 - compute_labels : bool, default : False + compute_labels : bool, default: False If True, labels are computed at instantiation - compute_misclassifications : bool, default : False + compute_misclassifications : bool, default: False If True, misclassifications are computed at instantiation - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose Returns @@ -392,9 +392,9 @@ def get_performance(self, method="by_unit", output="pandas"): Parameters ---------- - method : "by_unit" | "pooled_with_average", default : "by_unit" + method : "by_unit" | "pooled_with_average", default: "by_unit" The method to compute performance - output : "pandas" | "dict", default : "pandas" + output : "pandas" | "dict", default: "pandas" The output format Returns @@ -478,7 +478,7 @@ def get_well_detected_units(self, well_detected_score=None): Parameters ---------- - well_detected_score : float, default : None + well_detected_score : float, default: None The agreement score above which tested units are counted as "well detected". """ @@ -514,7 +514,7 @@ def get_false_positive_units(self, redundant_score=None): Parameters ---------- - redundant_score : float, default : None + redundant_score : float, default: None The agreement score below which tested units are counted as "false positive"" (and not "redundant"). """ @@ -554,7 +554,7 @@ def get_redundant_units(self, redundant_score=None): Parameters ---------- - redundant_score=None : float, default : None + redundant_score=None : float, default: None The agreement score above which tested units are counted as "redundant" (and not "false positive" ). """ @@ -589,7 +589,7 @@ def get_overmerged_units(self, overmerged_score=None): Parameters ---------- - overmerged_score : float, default : None + overmerged_score : float, default: None Tested units with 2 or more agreement scores above "overmerged_score" are counted as "overmerged". """ @@ -700,15 +700,15 @@ class TemplateComparison(BasePairComparison, MixinTemplateComparison): The first SortingAnalyzer to get templates to compare sorting_analyzer_2 : SortingAnalyzer The second SortingAnalyzer to get templates to compare - unit_ids1 : list, default : None + unit_ids1 : list, default: None List of units from sorting_analyzer_1 to compare - unit_ids2 : list, default : None + unit_ids2 : list, default: None List of units from sorting_analyzer_2 to compare - similarity_method : str, default : "cosine_similarity" + similarity_method : str, default: "cosine_similarity" Method for the similaroty matrix - sparsity_dict : dict, default : None + sparsity_dict : dict, default: None Dictionary for sparsity - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose Returns diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 78a8e8c84a..939caa360e 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -188,9 +188,9 @@ def get_num_samples(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index to retrieve the number of samples for. - For multi-segment objects, it is required, default : None + For multi-segment objects, it is required, default: None With single segment recording returns the number of samples in the segment Returns @@ -223,9 +223,9 @@ def get_duration(self, segment_index=None) -> float: Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The sample index to retrieve the duration for. - For multi-segment objects, it is required, default : None + For multi-segment objects, it is required, default: None With single segment recording returns the duration of the single segment Returns @@ -256,9 +256,9 @@ def get_memory_size(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The index of the segment for which the memory size should be calculated. - For multi-segment objects, it is required, default : None + For multi-segment objects, it is required, default: None With single segment recording returns the memory size of the single segment Returns @@ -301,20 +301,20 @@ def get_traces( Parameters ---------- - segment_index : int | None, default : None - The segment index to get traces from. If recording is multi-segment, it is required, default : None - start_frame : int | None, default : None - The start frame. If None, 0 is used, default : None - end_frame : int | None, default : None - The end frame. If None, the number of samples in the segment is used, default : None - channel_ids : list | np.array | tuple | None, default : None - The channel ids. If None, all channels are used, default : None - order : "C" | "F" | None, default : None + segment_index : int | None, default: None + The segment index to get traces from. If recording is multi-segment, it is required, default: None + start_frame : int | None, default: None + The start frame. If None, 0 is used, default: None + end_frame : int | None, default: None + The end frame. If None, the number of samples in the segment is used, default: None + channel_ids : list | np.array | tuple | None, default: None + The channel ids. If None, all channels are used, default: None + order : "C" | "F" | None, default: None The order of the traces ("C" | "F"). If None, traces are returned as they are - return_scaled : bool, default : False + return_scaled : bool, default: False If True and the recording has scaling (gain_to_uV and offset_to_uV properties), traces are scaled to uV - cast_unsigned : bool, default : False + cast_unsigned : bool, default: False If True and the traces are unsigned, they are cast to integer and centered (an offset of (2**nbits) is subtracted) @@ -424,7 +424,7 @@ def get_times(self, segment_index=None): Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index (required for multi-segment) Returns @@ -442,7 +442,7 @@ def has_time_vector(self, segment_index=None): Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index (required for multi-segment) Returns @@ -462,9 +462,9 @@ def set_times(self, times, segment_index=None, with_warning=True): ---------- times : 1d np.array The time vector - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index (required for multi-segment) - with_warning : bool, default : True + with_warning : bool, default: True If True, a warning is printed """ segment_index = self._check_segment_index(segment_index) @@ -830,11 +830,11 @@ def get_traces( Parameters ---------- - start_frame : int | None, default : None + start_frame : int | None, default: None start sample index, or zero if None - end_frame : int | None, default : None + end_frame : int | None, default: None end_sample, or number of samples if None - channel_indices : list | np.array | tuple | None, default : None + channel_indices : list | np.array | tuple | None, default: None Indices of channels to return, or all channels if None Returns diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 81df759a31..5443234910 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -237,7 +237,7 @@ def get_snippets( ---------- indices : list[int] Indices of the snippets to return - channel_indices : Union[list, None], default : None + channel_indices : Union[list, None], default: None Indices of channels to return, or all channels if None Returns @@ -269,9 +269,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame : Union[int, None], default : None + start_frame : Union[int, None], default: None start sample index, or zero if None - end_frame : Union[int, None], default : None + end_frame : Union[int, None], default: None end_sample, or number of samples if None Returns diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 85f4f6fb5c..7214d2780e 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -94,7 +94,7 @@ def get_num_samples(self, segment_index=None) -> int: Parameters ---------- - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index to retrieve the number of samples for. For multi-segment objects, it is required @@ -187,7 +187,7 @@ def register_recording(self, recording, check_spike_frames=True): recording : BaseRecording Recording with the same number of segments as current sorting. Assigned to self._recording. - check_spike_frames : bool, default : True + check_spike_frames : bool, default: True If True, assert for each segment that all spikes are within the recording's range. """ assert np.isclose( @@ -320,7 +320,7 @@ def count_num_spikes_per_unit(self, outputs="dict"): Parameters ---------- - outputs : "dict" | "array", default : "dict" + outputs : "dict" | "array", default: "dict" Control the type of the returned object : a dict (keys are unit_ids) or an numpy array. Returns @@ -388,7 +388,7 @@ def select_units(self, unit_ids, renamed_unit_ids=None) -> BaseSorting: ---------- unit_ids : numpy.array or list List of unit ids to keep - renamed_unit_ids : numpy.array or list, default : None + renamed_unit_ids : numpy.array or list, default: None If given, the kept unit ids are renamed Returns @@ -519,7 +519,7 @@ def precompute_spike_trains(self, from_spike_vector=None): Parameters ---------- - from_spike_vector : None | bool, default : None + from_spike_vector : None | bool, default: None If None, then it is automatic depending on whether the spike vector is cached. If True, will compute it from the spike vector. If False, will call `get_unit_spike_train` for each segment for each unit. @@ -560,14 +560,14 @@ def to_spike_vector( Parameters ---------- - concatenated : bool, default : True + concatenated : bool, default: True With concatenated=True the output is one numpy "spike vector" with spikes from all segments. With concatenated=False the output is a list "spike vector" by segment. - extremum_channel_inds : None or dict, default : None + extremum_channel_inds : None or dict, default: None If a dictionnary of unit_id to channel_ind is given then an extra field "channel_index". This can be convinient for computing spikes postion after sorter. This dict can be computed with `get_template_extremum_channel(we, outputs="index")` - use_cache : bool, default : True + use_cache : bool, default: True When True the spikes vector is cached as an attribute of the object (`_cached_spike_vector`). This caching only occurs when extremum_channel_inds=None. @@ -727,8 +727,8 @@ def get_unit_spike_train( Parameters ---------- unit_id - start_frame : int, default : None - end_frame : int, default : None + start_frame : int, default: None + end_frame : int, default: None Returns ------- diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index f2e4762c2c..5d72532704 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -27,19 +27,19 @@ class BinaryRecordingExtractor(BaseRecording): Number of channels dtype : str or dtype The dtype of the binary file - time_axis : int, default : 0 + time_axis : int, default: 0 The axis of the time dimension - t_starts : None or list of float, default : None + t_starts : None or list of float, default: None Times in seconds of the first sample for each segment - channel_ids : list, default : None + channel_ids : list, default: None A list of channel ids - file_offset : int, default : 0 + file_offset : int, default: 0 Number of bytes in the file to offset by during memmap instantiation. - gain_to_uV : float or array-like, default : None + gain_to_uV : float or array-like, default: None The gain to apply to the traces - offset_to_uV : float or array-like, default : None + offset_to_uV : float or array-like, default: None The offset to apply to the traces - is_filtered : bool or None, default : None + is_filtered : bool or None, default: None If True, the recording is assumed to be filtered. If None, is_filtered is not set. Notes @@ -141,7 +141,7 @@ def write_recording(recording, file_paths, dtype=None, **job_kwargs): The recording extractor object to be saved in .dat format file_paths : str The path to the file. - dtype : dtype, default : None + dtype : dtype, default: None Type of the saved data {} """ diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 3901d0422e..3849c2b6e7 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -32,7 +32,7 @@ Using a float between 0 and 1 will use that fraction of the total cores. * progress_bar : bool If True, a progress bar is printed - * mp_context : "fork" | "spawn" | None, default : None + * mp_context : "fork" | "spawn" | None, default: None Context for multiprocessing. It can be None, "fork" or "spawn". Note that "fork" is only safely available on LINUX systems """ @@ -282,31 +282,31 @@ class ChunkRecordingExecutor: Arguments for init_func verbose : bool If True, output is verbose - job_name : str, default : "" + job_name : str, default: "" Job name - handle_returns : bool, default : False + handle_returns : bool, default: False If True, the function can return values - gather_func : None or callable, default : None + gather_func : None or callable, default: None Optional function that is called in the main thread and retrieves the results of each worker. This function can be used instead of `handle_returns` to implement custom storage on-the-fly. - n_jobs : int, default : 1 + n_jobs : int, default: 1 Number of jobs to be used. Use -1 to use as many jobs as number of cores - total_memory : str, default : None + total_memory : str, default: None Total memory (RAM) to use (e.g. "1G", "500M") - chunk_memory : str, default : None + chunk_memory : str, default: None Memory per chunk (RAM) to use (e.g. "1G", "500M") - chunk_size : int or None, default : None + chunk_size : int or None, default: None Size of each chunk in number of samples. If "total_memory" or "chunk_memory" are used, it is ignored. chunk_duration : str or float or None Chunk duration in s if float or with units if str (e.g. "1s", "500ms") - mp_context : "fork" | "spawn" | None, default : None + mp_context : "fork" | "spawn" | None, default: None "fork" or "spawn". If None, the context is taken by the recording.get_preferred_mp_context(). "fork" is only safely available on LINUX systems. - max_threads_per_process : int or None, default : None + max_threads_per_process : int or None, default: None Limit the number of thread per process using threadpoolctl modules. This used only when n_jobs>1 If None, no limits. - progress_bar : bool, default : False + progress_bar : bool, default: False If True, a progress bar is printed to monitor the progress of the process diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 06e6cf75c8..62cd2fe2cf 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -139,7 +139,7 @@ class SharedMemoryRecording(BaseRecording): Times in seconds of the first sample for each segment channel_ids : list An optional list of channel_ids. If None, linear channels are assumed - main_shm_owner : bool, default : True + main_shm_owner : bool, default: True If True, the main instance will unlink the sharedmem buffer when deleted """ @@ -306,7 +306,7 @@ def from_times_labels(times_list, labels_list, sampling_frequency, unit_ids=None An array of spike times (in frames) labels_list : list of array (or array) An array of spike labels corresponding to the given times - unit_ids : list or None, default : None + unit_ids : list or None, default: None The explicit list of unit_ids that should be extracted from labels_list If None, then it will be np.unique(labels_list) """ @@ -668,7 +668,7 @@ def get_snippets( ---------- indices : list[int] Indices of the snippets to return - channel_indices : Union[list, None], default : None + channel_indices : Union[list, None], default: None Indices of channels to return, or all channels if None Returns @@ -689,9 +689,9 @@ def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Uni Parameters ---------- - start_frame : Union[int, None], default : None + start_frame : Union[int, None], default: None start sample index, or zero if None - end_frame : Union[int, None], default : None + end_frame : Union[int, None], default: None end_sample, or number of samples if None Returns ------- diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index 3f7fda694f..c32baf9d59 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -33,10 +33,10 @@ def read_binary_recording(file, num_channels, dtype, time_axis=0, offset=0): Number of channels dtype : dtype dtype of the file - time_axis : 0 or 1, default : 0 + time_axis : 0 or 1, default: 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - offset : int, default : 0 + offset : int, default: 0 number of offset bytes """ @@ -89,14 +89,14 @@ def write_binary_recording( The recording extractor object to be saved in .dat format file_path : str or list[str] The path to the file. - dtype : dtype or None, default : None + dtype : dtype or None, default: None Type of the saved data - add_file_extension, bool, default : True + add_file_extension, bool, default: True If True, and the file path does not end in "raw", "bin", or "dat" then "raw" is added as an extension. - byte_offset : int, default : 0 + byte_offset : int, default: 0 Offset in bytes for the binary file (e.g. to write a header). This is useful in case you want to append data to an existing file where you wrote a header or other data before. - auto_cast_uint : bool, default : True + auto_cast_uint : bool, default: True If True, unsigned integers are automatically cast to int if the specified dtype is signed .. deprecated:: 0.103, use the `unsigned_to_signed` function instead. verbose : bool @@ -294,11 +294,11 @@ def write_memory_recording(recording, dtype=None, verbose=False, auto_cast_uint= ---------- recording : RecordingExtractor The recording extractor object to be saved in .dat format - dtype : dtype, default : None + dtype : dtype, default: None Type of the saved data - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint : bool, default : True + auto_cast_uint : bool, default: True If True, unsigned integers are automatically cast to int if the specified dtype is signed buffer_type : "auto" | "numpy" | "sharedmem" {} @@ -388,28 +388,28 @@ def write_to_h5_dataset_format( Path to dataset in the h5 file (e.g. "/dataset") segment_index : int index of segment - save_path : str, default : None + save_path : str, default: None The path to the file. - file_handle : file handle, default : None + file_handle : file handle, default: None The file handle to dump data. This can be used to append data to an header. In case file_handle is given, the file is NOT closed after writing the binary data. - time_axis : 0 or 1, default : 0 + time_axis : 0 or 1, default: 0 If 0 then traces are transposed to ensure (nb_sample, nb_channel) in the file. If 1, the traces shape (nb_channel, nb_sample) is kept in the file. - single_axis : bool, default : False + single_axis : bool, default: False If True, a single-channel recording is saved as a one dimensional array - dtype : dtype, default : None + dtype : dtype, default: None Type of the saved data - chunk_size : None or int, default : None + chunk_size : None or int, default: None Number of chunks to save the file in. This avoids too much memory consumption for big files. If None and "chunk_memory" is given, the file is saved in chunks of "chunk_memory" MB - chunk_memory : None or str, default : "500M" + chunk_memory : None or str, default: "500M" Chunk size in bytes must end with "k", "M" or "G" - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint : bool, default : True + auto_cast_uint : bool, default: True If True, unsigned integers are automatically cast to int if the specified dtype is signed - return_scaled : bool, default : False + return_scaled : bool, default: False If True and the recording has scaling (gain_to_uV and offset_to_uV properties), traces are dumped to uV """ @@ -527,17 +527,17 @@ def get_random_data_chunks( ---------- recording : BaseRecording The recording to get random chunks from - return_scaled : bool, default : False + return_scaled : bool, default: False If True, returned chunks are scaled to uV - num_chunks_per_segment : int, default : 20 + num_chunks_per_segment : int, default: 20 Number of chunks per segment - chunk_size : int, default : 10000 + chunk_size : int, default: 10000 Size of a chunk in number of frames - concatenated : bool, default : True + concatenated : bool, default: True If True chunk are concatenated along time axis - seed : int, default : 0 + seed : int, default: 0 Random seed - margin_frames : int, default : 0 + margin_frames : int, default: 0 Margin in number of frames to avoid edge effects Returns @@ -606,7 +606,7 @@ def get_closest_channels(recording, channel_ids=None, num_channels=None): The recording extractor to get closest channels channel_ids : list List of channels ids to compute there near neighborhood - num_channels : int, default : None + num_channels : int, default: None Maximum number of neighborhood channels to return Returns @@ -655,7 +655,7 @@ def get_noise_levels( The recording extractor to get noise levels return_scaled : bool If True, returned noise levels are scaled to uV - method : "mad" | "std", default : "mad" + method : "mad" | "std", default: "mad" The method to use to estimate noise levels force_recompute : bool If True, noise levels are recomputed even if they are already stored in the recording extractor @@ -820,11 +820,11 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), The input recording channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str, tuple, or list, default : ('x', 'y') + dimensions : str, tuple, or list, default: ('x', 'y') If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity - flip : bool, default : False + flip : bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. diff --git a/src/spikeinterface/core/sortinganalyzer.py b/src/spikeinterface/core/sortinganalyzer.py index 838905f187..c541634e98 100644 --- a/src/spikeinterface/core/sortinganalyzer.py +++ b/src/spikeinterface/core/sortinganalyzer.py @@ -59,20 +59,20 @@ def create_sorting_analyzer( The sorting object recording : Recording The recording object - folder : str or Path or None, default : None + folder : str or Path or None, default: None The folder where waveforms are cached - format : "memory | "binary_folder" | "zarr", default : "memory" + format : "memory | "binary_folder" | "zarr", default: "memory" The mode to store waveforms. If "folder", waveforms are stored on disk in the specified folder. The "folder" argument must be specified in case of mode "folder". If "memory" is used, the waveforms are stored in RAM. Use this option carefully! - sparse : bool, default : True + sparse : bool, default: True If True, then a sparsity mask is computed using the `estimate_sparsity()` function using a few spikes to get an estimate of dense templates to create a ChannelSparsity object. Then, the sparsity will be propagated to all ResultExtention that handle sparsity (like wavforms, pca, ...) You can control `estimate_sparsity()` : all extra arguments are propagated to it (included job_kwargs) - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None The sparsity used to compute waveforms. If this is given, `sparse` is ignored. - return_scaled : bool, default : True + return_scaled : bool, default: True All extensions that play with traces will use this global return_scaled : "waveforms", "noise_levels", "templates". This prevent return_scaled being differents from different extensions and having wrong snr for instance. @@ -150,7 +150,7 @@ def load_sorting_analyzer(folder, load_extensions=True, format="auto"): ---------- folder : str or Path The folder / zarr folder where the waveform extractor is stored - load_extensions : bool, default : True + load_extensions : bool, default: True Load all extensions or not. format : "auto" | "binary_folder" | "zarr" The format of the folder. @@ -685,7 +685,7 @@ def save_as(self, format="memory", folder=None) -> "SortingAnalyzer": ---------- folder : str or Path The output waveform folder - format : "binary_folder" | "zarr", default : "binary_folder" + format : "binary_folder" | "zarr", default: "binary_folder" The backend to use for saving the waveforms """ return self._save_or_select(format=format, folder=folder, unit_ids=None) @@ -849,9 +849,9 @@ def compute(self, input, save=True, extension_params=None, verbose=False, **kwar * a dict: compute several extensions. The keys are the extension names and the values are dictiopnaries with the extension parameters. * a list: compute several extensions. The list contains the extension names. Additional parameters can be passed with the extension_params argument. - save : bool, default : True + save : bool, default: True If True the extension is saved to disk (only if sorting analyzer format is not "memory") - extension_params : dict or None, default : None + extension_params : dict or None, default: None If input is a list, this parameter can be used to specify parameters for each extension. The extension_params keys must be included in the input list. **kwargs: @@ -914,7 +914,7 @@ def compute_one_extension(self, extension_name, save=True, verbose=False, **kwar extension_name : str The name of the extension. For instance "waveforms", "templates", ... - save : bool, default : True + save : bool, default: True It the extension can be saved then it is saved. If not then the extension will only live in memory as long as the object is deleted. save=False is convenient to try some parameters without changing an already saved extension. @@ -982,7 +982,7 @@ def compute_several_extensions(self, extensions, save=True, verbose=False, **job ---------- extensions : dict Keys are extension_names and values are params. - save : bool, default : True + save : bool, default: True It the extension can be saved then it is saved. If not then the extension will only live in memory as long as the object is deleted. save=False is convenient to try some parameters without changing an already saved extension. @@ -1343,7 +1343,7 @@ def get_extension_class(extension_name: str, auto_import=True): ---------- extension_name : str The extension name. - auto_import : bool, default : True + auto_import : bool, default: True Auto import the module if the extension class is not registered yet. Returns diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 48cba4f0be..3dc8c050db 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -567,20 +567,20 @@ def estimate_sparsity( The recording sorting : BaseSorting The sorting - num_spikes_for_sparsity : int, default : 100 + num_spikes_for_sparsity : int, default: 100 How many spikes per units to compute the sparsity - ms_before : float, default : 1.0 + ms_before : float, default: 1.0 Cut out in ms before spike time - ms_after : float, default : 2.5 + ms_after : float, default: 2.5 Cut out in ms after spike time - method : "radius" | "best_channels", default : "radius" + method : "radius" | "best_channels", default: "radius" Sparsity method propagated to the `compute_sparsity()` function. Only "radius" or "best_channels" are implemented - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Sign of the template to compute best channels - radius_um : float, default : 100.0 + radius_um : float, default: 100.0 Used for "radius" method - num_channels : int, default : 5 + num_channels : int, default: 5 Used for "best_channels" method {} diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 50fba73aef..1ba9372322 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -15,7 +15,7 @@ def get_dense_templates_array(one_object: Templates | SortingAnalyzer, return_sc ---------- one_object : Templates | SortingAnalyzer The Templates or SortingAnalyzer objects. If SortingAnalyzer, it needs the "templates" extension. - return_scaled : bool, default : True + return_scaled : bool, default: True If True, templates are scaled. Returns @@ -69,7 +69,7 @@ def get_template_amplitudes( A Templates or a SortingAnalyzer object peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default: "at_index" Where the amplitude is computed * "extremum" : take the peak value (max or min depending on `peak_sign`) * "at_index" : take value at `nbefore` index @@ -135,12 +135,12 @@ def get_template_extremum_channel( A Templates or a SortingAnalyzer object peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default: "at_index" Where the amplitude is computed * "extremum" : take the peak value (max or min depending on `peak_sign`) * "at_index" : take value at `nbefore` index * "peak_to_peak" : take the peak-to-peak amplitude - outputs : "id" | "index", default : "id" + outputs : "id" | "index", default: "id" * "id" : channel id * "index" : channel index @@ -248,7 +248,7 @@ def get_template_extremum_amplitude( A Templates or a SortingAnalyzer object peak_sign : "neg" | "pos" | "both" Sign of the template to find extremum channels - mode : "extremum" | "at_index" | "peak_to_peak", default : "at_index" + mode : "extremum" | "at_index" | "peak_to_peak", default: "at_index" Where the amplitude is computed * "extremum": take the peak value (max or min depending on `peak_sign`) * "at_index": take value at `nbefore` index diff --git a/src/spikeinterface/core/zarrextractors.py b/src/spikeinterface/core/zarrextractors.py index 7693935ef7..ee8021fa72 100644 --- a/src/spikeinterface/core/zarrextractors.py +++ b/src/spikeinterface/core/zarrextractors.py @@ -160,7 +160,7 @@ class ZarrSortingExtractor(BaseSorting): Path to the zarr root file storage_options : dict or None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. - zarr_group : str or None, default : None + zarr_group : str or None, default: None Optional zarr group path to load the sorting from. This can be used when the sorting is not stored at the root, but in sub group. Returns ------- @@ -294,7 +294,7 @@ def get_default_zarr_compressor(clevel: int = 5): Parameters ---------- - clevel : int, default : 5 + clevel : int, default: 5 Compression level (higher -> more compressed). Minimum 1, maximum 9. By default 5 @@ -459,17 +459,17 @@ def add_traces_to_zarr( The zarr group to add traces to dataset_paths : list List of paths to traces datasets in the zarr group - channel_chunk_size : int or None, default : None (chunking in time only) + channel_chunk_size : int or None, default: None (chunking in time only) Channels per chunk - dtype : dtype, default : None + dtype : dtype, default: None Type of the saved data - compressor : zarr compressor or None, default : None + compressor : zarr compressor or None, default: None Zarr compressor - filters : list, default : None + filters : list, default: None List of zarr filters - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose (when chunks are used) - auto_cast_uint : bool, default : True + auto_cast_uint : bool, default: True If True, unsigned integers are automatically cast to int if the specified dtype is signed {} """ diff --git a/src/spikeinterface/curation/auto_merge.py b/src/spikeinterface/curation/auto_merge.py index d596fd7608..818b6a72b0 100644 --- a/src/spikeinterface/curation/auto_merge.py +++ b/src/spikeinterface/curation/auto_merge.py @@ -61,46 +61,46 @@ def get_potential_auto_merge( ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer - minimum_spikes : int, default : 1000 + minimum_spikes : int, default: 1000 Minimum number of spikes for each unit to consider a potential merge. Enough spikes are needed to estimate the correlogram - maximum_distance_um : float, default : 150 + maximum_distance_um : float, default: 150 Maximum distance between units for considering a merge - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Peak sign used to estimate the maximum channel of a template - bin_ms : float, default : 0.25 + bin_ms : float, default: 0.25 Bin size in ms used for computing the correlogram - window_ms : float, default : 100 + window_ms : float, default: 100 Window size in ms used for computing the correlogram - corr_diff_thresh : float, default : 0.16 + corr_diff_thresh : float, default: 0.16 The threshold on the "correlogram distance metric" for considering a merge. It needs to be between 0 and 1 - template_diff_thresh : float, default : 0.25 + template_diff_thresh : float, default: 0.25 The threshold on the "template distance metric" for considering a merge. It needs to be between 0 and 1 template_metric : 'l1' The metric to be used when comparing templates. Default is l1 norm - censored_period_ms : float, default : 0.3 + censored_period_ms : float, default: 0.3 Used to compute the refractory period violations aka "contamination" - refractory_period_ms : float, default : 1 + refractory_period_ms : float, default: 1 Used to compute the refractory period violations aka "contamination" - sigma_smooth_ms : float, default : 0.6 + sigma_smooth_ms : float, default: 0.6 Parameters to smooth the correlogram estimation - contamination_threshold : float, default : 0.2 + contamination_threshold : float, default: 0.2 Threshold for not taking in account a unit when it is too contaminated - adaptative_window_threshold : : float, default : 0.5 + adaptative_window_threshold : : float, default: 0.5 Parameter to detect the window size in correlogram estimation - censor_correlograms_ms : float, default : 0.15 + censor_correlograms_ms : float, default: 0.15 The period to censor on the auto and cross-correlograms - num_channels : int, default : 5 + num_channels : int, default: 5 Number of channel to use for template similarity computation - num_shift : int, default : 5 + num_shift : int, default: 5 Number of shifts in samles to be explored for template similarity computation - firing_contamination_balance : float, default : 1.5 + firing_contamination_balance : float, default: 1.5 Parameter to control the balance between firing rate and contamination in computing unit "quality score" - extra_outputs : bool, default : False + extra_outputs : bool, default: False If True, an additional dictionary (`outs`) with processed data is returned - steps : None or list of str, default : None + steps : None or list of str, default: None which steps to run (gives flexibility to running just some steps) If None all steps are done. Pontential steps : "min_spikes", "remove_contaminated", "unit_positions", "correlogram", "template_similarity", @@ -410,9 +410,9 @@ def compute_templates_diff( The sorting object templates_array : np.array The templates array (num_units, num_samples, num_channels). - num_channels : int, default : 5 + num_channels : int, default: 5 Number of channel to use for template similarity computation - num_shift : int, default : 5 + num_shift : int, default: 5 Number of shifts in samles to be explored for template similarity computation pair_mask : None or boolean array A bool matrix of size (num_units, num_units) to select diff --git a/src/spikeinterface/curation/curationsorting.py b/src/spikeinterface/curation/curationsorting.py index 979b70679c..b031ab9146 100644 --- a/src/spikeinterface/curation/curationsorting.py +++ b/src/spikeinterface/curation/curationsorting.py @@ -20,7 +20,7 @@ class CurationSorting: ---------- parent_sorting : Recording The recording object - properties_policy : "keep" | "remove", default : "keep" + properties_policy : "keep" | "remove", default: "keep" Policy used to propagate properties after split and merge operation. If "keep" the properties will be passed to the new units (if the original units have the same value). If "remove" the new units will have an empty value for all the properties @@ -174,7 +174,7 @@ def select_units(self, unit_ids, renamed_unit_ids=None): ---------- unit_ids : list[str|int] List of unit ids to select - renamed_unit_ids : list or None, default : None + renamed_unit_ids : list or None, default: None List of new unit ids to rename the selected units """ new_sorting = self._sorting_stages[self._sorting_stages_i].select_units(unit_ids, renamed_unit_ids) diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 7c2c2ada33..3bf1d1d43a 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -19,7 +19,7 @@ class MergeUnitsSorting(BaseSorting): but it can also have more (merge multiple units at once). new_unit_ids : None or list A new unit_ids for merged units. If given, it needs to have the same length as `units_to_merge` - properties_policy : "keep" | "remove", default : "keep" + properties_policy : "keep" | "remove", default: "keep" Policy used to propagate properties. If "keep" the properties will be passed to the new units (if the units_to_merge have the same value). If "remove" the new units will have an empty value for all the properties of the new unit. diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 00d7b5d3c3..508ac8c8cc 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -18,7 +18,7 @@ class RemoveDuplicatedSpikesSorting(BaseSorting): The parent sorting. censored_period_ms : float The censored period to consider 2 spikes to be duplicated (in ms). - method : "keep_first" | "keep_last" | "keep_first_iterative" | "keep_last_iterative" | "random", default : "keep_first" + method : "keep_first" | "keep_last" | "keep_first_iterative" | "keep_last_iterative" | "random", default: "keep_first" Method used to remove the duplicated spikes. If method = "random", will randomly choose to remove the first or last spike. If method = "keep_first", for each ISI violation, will remove the second spike. diff --git a/src/spikeinterface/curation/remove_redundant.py b/src/spikeinterface/curation/remove_redundant.py index 5ceac0d849..874552f767 100644 --- a/src/spikeinterface/curation/remove_redundant.py +++ b/src/spikeinterface/curation/remove_redundant.py @@ -37,16 +37,16 @@ def remove_redundant_units( If SortingAnalyzer, the spike trains can be optionally realigned using the peak shift in the template to improve the matching procedure. If BaseSorting, the spike trains are not aligned. - align : bool, default : False + align : bool, default: False If True, spike trains are aligned (if a SortingAnalyzer is used) - delta_time : float, default : 0.4 + delta_time : float, default: 0.4 The time in ms to consider matching spikes - agreement_threshold : float, default : 0.2 + agreement_threshold : float, default: 0.2 Threshold on the agreement scores to flag possible redundant/duplicate units - duplicate_threshold : float, default : 0.8 + duplicate_threshold : float, default: 0.8 Final threshold on the portion of coincident events over the number of spikes above which the unit is removed - remove_strategy : "minimum_shift" | "highest_amplitude" | "max_spikes", default : "minimum_shift" + remove_strategy : "minimum_shift" | "highest_amplitude" | "max_spikes", default: "minimum_shift" Which strategy to remove one of the two duplicated units: * "minimum_shift" : keep the unit with best peak alignment (minimum shift) @@ -54,9 +54,9 @@ def remove_redundant_units( * "highest_amplitude" : keep the unit with the best amplitude on unshifted max. * "max_spikes" : keep the unit with more spikes - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Used when remove_strategy="highest_amplitude" - extra_outputs : bool, default : False + extra_outputs : bool, default: False If True, will return the redundant pairs. Returns @@ -149,11 +149,11 @@ def find_redundant_units(sorting, delta_time: float = 0.4, agreement_threshold=0 ---------- sorting : BaseSorting The input sorting object - delta_time : float, default : 0.4 + delta_time : float, default: 0.4 The time in ms to consider matching spikes - agreement_threshold : float, default : 0.2 + agreement_threshold : float, default: 0.2 Threshold on the agreement scores to flag possible redundant/duplicate units - duplicate_threshold : float, default : 0.8 + duplicate_threshold : float, default: 0.8 Final threshold on the portion of coincident events over the number of spikes above which the unit is flagged as duplicate/redundant diff --git a/src/spikeinterface/curation/splitunitsorting.py b/src/spikeinterface/curation/splitunitsorting.py index 4163d396e9..8fc6afcde8 100644 --- a/src/spikeinterface/curation/splitunitsorting.py +++ b/src/spikeinterface/curation/splitunitsorting.py @@ -24,7 +24,7 @@ class SplitUnitSorting(BaseSorting): If the sorting has only one segment, indices_list can be a single array new_unit_ids : int Unit ids of the new units to be created - properties_policy : "keep" | "remove", default : "keep" + properties_policy : "keep" | "remove", default: "keep" Policy used to propagate properties. If "keep" the properties will be passed to the new units (if the units_to_merge have the same value). If "remove" the new units will have an empty value for all the properties of the new unit diff --git a/src/spikeinterface/exporters/report.py b/src/spikeinterface/exporters/report.py index 349014c14a..95d3713065 100644 --- a/src/spikeinterface/exporters/report.py +++ b/src/spikeinterface/exporters/report.py @@ -32,15 +32,15 @@ def export_report( A SortingAnalyzer object output_folder : str The output folder where the report files are saved - remove_if_exists : bool, default : False + remove_if_exists : bool, default: False If True and the output folder exists, it is removed - format : str, default : "png" + format : str, default: "png" The output figure format (any format handled by matplotlib) - peak_sign : "neg" or "pos", default : "neg" + peak_sign : "neg" or "pos", default: "neg" used to compute amplitudes and metrics - show_figures : bool, default : False + show_figures : bool, default: False If True, figures are shown. If False, figures are closed after saving - force_computation : bool, default : False + force_computation : bool, default: False Force or not some heavy computaion before exporting {} """ diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index fbdb91ca7e..551431fe09 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -47,25 +47,25 @@ def export_to_phy( A SortingAnalyzer object output_folder : str | Path The output folder where the phy template-gui files are saved - compute_pc_features : bool, default : True + compute_pc_features : bool, default: True If True, pc features are computed - compute_amplitudes : bool, default : True + compute_amplitudes : bool, default: True If True, waveforms amplitudes are computed - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None The sparsity object - copy_binary : bool, default : True + copy_binary : bool, default: True If True, the recording is copied and saved in the phy "output_folder" - remove_if_exists : bool, default : False + remove_if_exists : bool, default: False If True and "output_folder" exists, it is removed and overwritten - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Used by compute_spike_amplitudes - template_mode : str, default : "average" + template_mode : str, default: "average" Parameter "mode" to be given to SortingAnalyzer.get_template() - dtype : dtype or None, default : None + dtype : dtype or None, default: None Dtype to save binary data - verbose : bool, default : True + verbose : bool, default: True If True, output is verbose - use_relative_path : bool, default : False + use_relative_path : bool, default: False If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r"recording.dat"`). If `copy_binary=False`, then uses a path relative to the `output_folder` If False, uses an absolute path in the `params.py` (ie `dat_path=r"path/to/the/recording.dat"`) {} diff --git a/src/spikeinterface/extractors/cbin_ibl.py b/src/spikeinterface/extractors/cbin_ibl.py index 76c319a741..e5ff8ed371 100644 --- a/src/spikeinterface/extractors/cbin_ibl.py +++ b/src/spikeinterface/extractors/cbin_ibl.py @@ -24,10 +24,10 @@ class CompressedBinaryIblExtractor(BaseRecording): ---------- folder_path : str or Path Path to ibl folder. - load_sync_channel : bool, default : False + load_sync_channel : bool, default: False Load or not the last channel (sync). If not then the probe is loaded. - stream_name : str, default : "ap". + stream_name : str, default: "ap". Whether to load AP or LFP band, one of "ap" or "lp". diff --git a/src/spikeinterface/extractors/herdingspikesextractors.py b/src/spikeinterface/extractors/herdingspikesextractors.py index 7749968257..139d51d62e 100644 --- a/src/spikeinterface/extractors/herdingspikesextractors.py +++ b/src/spikeinterface/extractors/herdingspikesextractors.py @@ -22,7 +22,7 @@ class HerdingspikesSortingExtractor(BaseSorting): ---------- folder_path : str or Path Path to the ALF folder. - load_unit_info : bool, default : True + load_unit_info : bool, default: True Whether to load the unit info from the file. Returns diff --git a/src/spikeinterface/extractors/klustaextractors.py b/src/spikeinterface/extractors/klustaextractors.py index 3b6685c30c..dc01b3b9eb 100644 --- a/src/spikeinterface/extractors/klustaextractors.py +++ b/src/spikeinterface/extractors/klustaextractors.py @@ -34,7 +34,7 @@ class KlustaSortingExtractor(BaseSorting): ---------- file_or_folder_path : str or Path Path to the ALF folder. - exclude_cluster_groups : list or str, default : None + exclude_cluster_groups : list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). Returns diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 7fa089d5b6..0ecad27994 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -23,11 +23,11 @@ class MdaRecordingExtractor(BaseRecording): ---------- folder_path : str or Path Path to the MDA folder. - raw_fname : str, default : "raw.mda" + raw_fname : str, default: "raw.mda" File name of raw file - params_fname : str, default : "params.json" + params_fname : str, default: "params.json" File name of params file - geom_fname : str, default : "geom.csv" + geom_fname : str, default: "geom.csv" File name of geom file Returns @@ -89,13 +89,13 @@ def write_recording( params : dictionary Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary. - raw_fname : str, default : "raw.mda" + raw_fname : str, default: "raw.mda" File name of raw file - params_fname : str, default : "params.json" + params_fname : str, default: "params.json" File name of params file - geom_fname : str, default : "geom.csv" + geom_fname : str, default: "geom.csv" File name of geom file - dtype : dtype or None, default : None + dtype : dtype or None, default: None Data type to be used. If None dtype is same as recording traces. **job_kwargs: Use by job_tools modules to set: diff --git a/src/spikeinterface/extractors/neoextractors/alphaomega.py b/src/spikeinterface/extractors/neoextractors/alphaomega.py index 1e0876ab3e..5c8e58d3a5 100644 --- a/src/spikeinterface/extractors/neoextractors/alphaomega.py +++ b/src/spikeinterface/extractors/neoextractors/alphaomega.py @@ -17,13 +17,13 @@ class AlphaOmegaRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str or Path-like The folder path to the AlphaOmega recordings. - lsx_files : list of strings or None, default : None + lsx_files : list of strings or None, default: None A list of listings files that refers to mpx files to load. - stream_id : {"RAW", "LFP", "SPK", "ACC", "AI", "UD"}, default : "RAW" + stream_id : {"RAW", "LFP", "SPK", "ACC", "AI", "UD"}, default: "RAW" If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/axona.py b/src/spikeinterface/extractors/neoextractors/axona.py index 455f3ca2d6..71e1277946 100644 --- a/src/spikeinterface/extractors/neoextractors/axona.py +++ b/src/spikeinterface/extractors/neoextractors/axona.py @@ -17,7 +17,7 @@ class AxonaRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/biocam.py b/src/spikeinterface/extractors/neoextractors/biocam.py index f724834db5..96d4dd25a6 100644 --- a/src/spikeinterface/extractors/neoextractors/biocam.py +++ b/src/spikeinterface/extractors/neoextractors/biocam.py @@ -19,15 +19,15 @@ class BiocamRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - mea_pitch : float, default : None + mea_pitch : float, default: None The inter-electrode distance (pitch) between electrodes. - electrode_width : float, default : None + electrode_width : float, default: None Width of the electrodes in um. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/blackrock.py b/src/spikeinterface/extractors/neoextractors/blackrock.py index ac50805e25..c3a4c5ad31 100644 --- a/src/spikeinterface/extractors/neoextractors/blackrock.py +++ b/src/spikeinterface/extractors/neoextractors/blackrock.py @@ -21,11 +21,11 @@ class BlackrockRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ @@ -77,13 +77,13 @@ class BlackrockSortingExtractor(NeoBaseSortingExtractor): ---------- file_path : str The file path to load the recordings from - sampling_frequency : float, default : None + sampling_frequency : float, default: None The sampling frequency for the sorting extractor. When the signal data is available (.ncs) those files will be used to extract the frequency automatically. Otherwise, the sampling frequency needs to be specified for this extractor to be initialized - stream_id : str, default : None + stream_id : str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name : str, default : None + stream_name : str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/ced.py b/src/spikeinterface/extractors/neoextractors/ced.py index ca090e596e..401c927fc7 100644 --- a/src/spikeinterface/extractors/neoextractors/ced.py +++ b/src/spikeinterface/extractors/neoextractors/ced.py @@ -19,13 +19,13 @@ class CedRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to the smr or smrx file. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - block_index : int, default : None + block_index : int, default: None If there are several blocks, specify the block index you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/intan.py b/src/spikeinterface/extractors/neoextractors/intan.py index 421e676679..b8760ebc88 100644 --- a/src/spikeinterface/extractors/neoextractors/intan.py +++ b/src/spikeinterface/extractors/neoextractors/intan.py @@ -17,11 +17,11 @@ class IntanRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/maxwell.py b/src/spikeinterface/extractors/neoextractors/maxwell.py index 5ca353cd9d..3888b6d5a0 100644 --- a/src/spikeinterface/extractors/neoextractors/maxwell.py +++ b/src/spikeinterface/extractors/neoextractors/maxwell.py @@ -22,18 +22,18 @@ class MaxwellRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to the maxwell h5 file. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. For MaxTwo when there are several wells at the same time you need to specify stream_id='well000' or 'well0001', etc. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. - rec_name : str, default : None + rec_name : str, default: None When the file contains several recordings you need to specify the one you want to extract. (rec_name='rec0000'). - install_maxwell_plugin : bool, default : False + install_maxwell_plugin : bool, default: False If True, install the maxwell plugin for neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/mcsraw.py b/src/spikeinterface/extractors/neoextractors/mcsraw.py index 2220ef41c8..0cbd9263ba 100644 --- a/src/spikeinterface/extractors/neoextractors/mcsraw.py +++ b/src/spikeinterface/extractors/neoextractors/mcsraw.py @@ -20,13 +20,13 @@ class MCSRawRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - block_index : int, default : None + block_index : int, default: None If there are several blocks, specify the block index you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/mearec.py b/src/spikeinterface/extractors/neoextractors/mearec.py index 21e51eb0fd..76f0b29f54 100644 --- a/src/spikeinterface/extractors/neoextractors/mearec.py +++ b/src/spikeinterface/extractors/neoextractors/mearec.py @@ -36,7 +36,7 @@ class MEArecRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuralynx.py b/src/spikeinterface/extractors/neoextractors/neuralynx.py index fa35b0396e..25b6bb5b61 100644 --- a/src/spikeinterface/extractors/neoextractors/neuralynx.py +++ b/src/spikeinterface/extractors/neoextractors/neuralynx.py @@ -20,16 +20,16 @@ class NeuralynxRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. - exlude_filename : list[str], default : None + exlude_filename : list[str], default: None List of filename to exclude from the loading. For example, use `exclude_filename=["events.nev"]` to skip loading the event file. - strict_gap_mode : bool, default : False + strict_gap_mode : bool, default: False See neo documentation. Detect gaps using strict mode or not. * strict_gap_mode = True then a gap is consider when timstamp difference between two @@ -83,9 +83,9 @@ class NeuralynxSortingExtractor(NeoBaseSortingExtractor): sampling_frequency : float The sampling frequency for the spiking channels. When the signal data is available (.ncs) those files will be used to extract the frequency. Otherwise, the sampling frequency needs to be specified for this extractor. - stream_id : str, default : None + stream_id : str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. - stream_name : str, default : None + stream_name : str, default: None Used to extract information about the sampling frequency and t_start from the analog signal if provided. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py index 18f0dfcf5f..49784418e1 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroexplorer.py +++ b/src/spikeinterface/extractors/neoextractors/neuroexplorer.py @@ -38,12 +38,12 @@ class NeuroExplorerRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. For this neo reader streams are defined by their sampling frequency. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/neuroscope.py b/src/spikeinterface/extractors/neoextractors/neuroscope.py index b386122266..2f6502b1c8 100644 --- a/src/spikeinterface/extractors/neoextractors/neuroscope.py +++ b/src/spikeinterface/extractors/neoextractors/neuroscope.py @@ -27,13 +27,13 @@ class NeuroScopeRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to the binary container usually a .dat, .lfp, .eeg extension. - xml_file_path : str, default : None + xml_file_path : str, default: None The path to the xml file. If None, the xml file is assumed to have the same name as the binary file. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ @@ -94,12 +94,12 @@ class NeuroScopeSortingExtractor(BaseSorting): clufile_path : PathType Optional. Path to a particular .clu text file. If given, only the single .clu file (and the respective .res file) are loaded - keep_mua_units : bool, default : True + keep_mua_units : bool, default: True Optional. Whether or not to return sorted spikes from multi-unit activity exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res.%i and .clu.%i pairs. - xml_file_path : PathType, default : None + xml_file_path : PathType, default: None Path to the .xml file referenced by this sorting. """ @@ -306,14 +306,14 @@ def read_neuroscope( The xml file. stream_id : str or None The stream id to load. If None, the first stream is loaded - keep_mua_units : bool, default : False + keep_mua_units : bool, default: False Optional. Whether or not to return sorted spikes from multi-unit activity exclude_shanks : list Optional. List of indices to ignore. The set of all possible indices is chosen by default, extracted as the final integer of all the .res. % i and .clu. % i pairs. - load_recording : bool, default : True + load_recording : bool, default: True If True, the recording is loaded - load_sorting : bool, default : False + load_sorting : bool, default: False If True, the sorting is loaded """ outputs = () diff --git a/src/spikeinterface/extractors/neoextractors/nix.py b/src/spikeinterface/extractors/neoextractors/nix.py index 31cce32a41..00e5f8bfc1 100644 --- a/src/spikeinterface/extractors/neoextractors/nix.py +++ b/src/spikeinterface/extractors/neoextractors/nix.py @@ -17,13 +17,13 @@ class NixRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - block_index : int, default : None + block_index : int, default: None If there are several blocks, specify the block index you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 80ff098a00..f3363b9013 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -51,13 +51,13 @@ class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str The folder path to load the recordings from - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load - block_index : int, default : None + block_index : int, default: None If there are several blocks (experiments), specify the block index you want to load - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotation from neo ignore_timestamps_errors : None Deprecated keyword argument. This is now ignored. @@ -116,24 +116,24 @@ class OpenEphysBinaryRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str The folder path to the root folder (containing the record node folders) - load_sync_channel : bool, default : False + load_sync_channel : bool, default: False If False (default) and a SYNC channel is present (e.g. Neuropixels), this is not loaded If True, the SYNC channel is loaded and can be accessed in the analog signals. - load_sync_timestamps : bool, default : False + load_sync_timestamps : bool, default: False If True, the synchronized_timestamps are loaded and set as times to the recording. If False (default), only the t_start and sampling rate are set, and timestamps are assumed to be uniform and linearly increasing - experiment_names : str, list, or None, default : None + experiment_names : str, list, or None, default: None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load - block_index : int, default : None + block_index : int, default: None If there are several blocks (experiments), specify the block index you want to load - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotation from neo """ @@ -309,29 +309,29 @@ def read_openephys(folder_path, **kwargs): ---------- folder_path : str or Path Path to openephys folder - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load - block_index : int, default : None + block_index : int, default: None If there are several blocks (experiments), specify the block index you want to load - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotation from neo - load_sync_channel : bool, default : False + load_sync_channel : bool, default: False If False (default) and a SYNC channel is present (e.g. Neuropixels), this is not loaded. If True, the SYNC channel is loaded and can be accessed in the analog signals. For open ephsy binary format only - load_sync_timestamps : bool, default : False + load_sync_timestamps : bool, default: False If True, the synchronized_timestamps are loaded and set as times to the recording. If False (default), only the t_start and sampling rate are set, and timestamps are assumed to be uniform and linearly increasing. For open ephsy binary format only - experiment_names : str, list, or None, default : None + experiment_names : str, list, or None, default: None If multiple experiments are available, this argument allows users to select one or more experiments. If None, all experiements are loaded as blocks. E.g. `experiment_names="experiment2"`, `experiment_names=["experiment1", "experiment2"]` For open ephsy binary format only - ignore_timestamps_errors : bool, default : False + ignore_timestamps_errors : bool, default: False Ignore the discontinuous timestamps errors in neo For open ephsy legacy format only @@ -359,7 +359,7 @@ def read_openephys_event(folder_path, block_index=None): ---------- folder_path : str or Path Path to openephys folder - block_index : int, default : None + block_index : int, default: None If there are several blocks (experiments), specify the block index you want to load. Returns diff --git a/src/spikeinterface/extractors/neoextractors/plexon.py b/src/spikeinterface/extractors/neoextractors/plexon.py index 08f2f8afe8..cf08778ffa 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon.py +++ b/src/spikeinterface/extractors/neoextractors/plexon.py @@ -17,11 +17,11 @@ class PlexonRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/plexon2.py b/src/spikeinterface/extractors/neoextractors/plexon2.py index fc325d14e7..fe24ba6f46 100644 --- a/src/spikeinterface/extractors/neoextractors/plexon2.py +++ b/src/spikeinterface/extractors/neoextractors/plexon2.py @@ -15,11 +15,11 @@ class Plexon2RecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ @@ -50,7 +50,7 @@ class Plexon2SortingExtractor(NeoBaseSortingExtractor): ---------- file_path : str The file path to load the recordings from. - sampling_frequency : float, default : None + sampling_frequency : float, default: None The sampling frequency of the sorting (required for multiple streams with different sampling frequencies). """ diff --git a/src/spikeinterface/extractors/neoextractors/spike2.py b/src/spikeinterface/extractors/neoextractors/spike2.py index 9f22a57ed1..1bd0351553 100644 --- a/src/spikeinterface/extractors/neoextractors/spike2.py +++ b/src/spikeinterface/extractors/neoextractors/spike2.py @@ -18,11 +18,11 @@ class Spike2RecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str, default : None + stream_id : str, default: None If there are several streams, specify the stream id you want to load. - stream_name : str, default : None + stream_name : str, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/spikegadgets.py b/src/spikeinterface/extractors/neoextractors/spikegadgets.py index 4bcf5eb1e9..6ce2ebe792 100644 --- a/src/spikeinterface/extractors/neoextractors/spikegadgets.py +++ b/src/spikeinterface/extractors/neoextractors/spikegadgets.py @@ -20,11 +20,11 @@ class SpikeGadgetsRecordingExtractor(NeoBaseRecordingExtractor): ---------- file_path : str The file path to load the recordings from. - stream_id : str or None, default : None + stream_id : str or None, default: None If there are several streams, specify the stream id you want to load. - stream_name : str or None, default : None + stream_name : str or None, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/spikeglx.py b/src/spikeinterface/extractors/neoextractors/spikeglx.py index 4d594c548a..25e1432297 100644 --- a/src/spikeinterface/extractors/neoextractors/spikeglx.py +++ b/src/spikeinterface/extractors/neoextractors/spikeglx.py @@ -32,15 +32,15 @@ class SpikeGLXRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str The folder path to load the recordings from. - load_sync_channel : bool default : False + load_sync_channel : bool default: False Whether or not to load the last channel in the stream, which is typically used for synchronization. If True, then the probe is not loaded. - stream_id : str or None, default : None + stream_id : str or None, default: None If there are several streams, specify the stream id you want to load. For example, "imec0.ap", "nidq", or "imec0.lf". - stream_name : str or None, default : None + stream_name : str or None, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/neoextractors/tdt.py b/src/spikeinterface/extractors/neoextractors/tdt.py index 3834f14d4b..146f6a4b4c 100644 --- a/src/spikeinterface/extractors/neoextractors/tdt.py +++ b/src/spikeinterface/extractors/neoextractors/tdt.py @@ -17,11 +17,11 @@ class TdtRecordingExtractor(NeoBaseRecordingExtractor): ---------- folder_path : str The folder path to the tdt folder. - stream_id : str or None, default : None + stream_id : str or None, default: None If there are several streams, specify the stream id you want to load. - stream_name : str or None, default : None + stream_name : str or None, default: None If there are several streams, specify the stream name you want to load. - all_annotations : bool, default : False + all_annotations : bool, default: False Load exhaustively all annotations from neo. """ diff --git a/src/spikeinterface/extractors/nwbextractors.py b/src/spikeinterface/extractors/nwbextractors.py index ace2411482..4729ccea86 100644 --- a/src/spikeinterface/extractors/nwbextractors.py +++ b/src/spikeinterface/extractors/nwbextractors.py @@ -111,12 +111,12 @@ def read_nwbfile( The path to the NWB file. Either provide this or file. file : file-like object or None The file-like object to read from. Either provide this or file_path. - stream_mode : "fsspec" | "remfile" | None, default : None + stream_mode : "fsspec" | "remfile" | None, default: None The streaming mode to use. If None it assumes the file is on the local disk. - cache : bool, default : False + cache : bool, default: False If True, the file is cached in the file passed to stream_cache_path if False, the file is not cached. - stream_cache_path : str or None, default : None + stream_cache_path : str or None, default: None The path to the cache storage, when default to None it uses the a temporary folder. Returns @@ -170,7 +170,7 @@ def _retrieve_electrical_series_pynwb( ---------- nwbfile : NWBFile The NWBFile object from which to extract the ElectricalSeries. - electrical_series_path : str, default : None + electrical_series_path : str, default: None The name of the ElectricalSeries to extract. If not specified, it will return the first found ElectricalSeries if there's only one; otherwise, it raises an error. @@ -224,7 +224,7 @@ def _retrieve_unit_table_pynwb(nwbfile: "NWBFile", unit_table_path: Optional[str ---------- nwbfile : NWBFile The NWBFile object from which to extract the Units. - unit_table_path : str, default : None + unit_table_path : str, default: None The path of the Units to extract. If not specified, it will return the first found Units if there's only one; otherwise, it raises an error. @@ -409,9 +409,9 @@ class NwbRecordingExtractor(BaseRecording): file_path : str, Path, or None Path to the NWB file or an s3 URL. Use this parameter to specify the file location if not using the `file` parameter. - electrical_series_name : str or None, default : None + electrical_series_name : str or None, default: None Deprecated, use `electrical_series_path` instead. - electrical_series_path : str or None, default : None + electrical_series_path : str or None, default: None The name of the ElectricalSeries object within the NWB file. This parameter is crucial when the NWB file contains multiple ElectricalSeries objects. It helps in identifying which specific series to extract data from. If there is only one ElectricalSeries and @@ -419,30 +419,30 @@ class NwbRecordingExtractor(BaseRecording): If multiple ElectricalSeries are present and this parameter is not set, an error is raised. The `electrical_series_path` corresponds to the path within the NWB file, e.g., 'acquisition/MyElectricalSeries`. - load_time_vector : bool, default : False + load_time_vector : bool, default: False If set to True, the time vector is also loaded into the recording object. Useful for cases where precise timing information is required. - samples_for_rate_estimation : int, default : 1000 + samples_for_rate_estimation : int, default: 1000 The number of timestamp samples used for estimating the sampling rate. This is relevant when the 'rate' attribute is not available in the ElectricalSeries. - stream_mode : "fsspec" | "remfile" | "zarr" | None, default : None + stream_mode : "fsspec" | "remfile" | "zarr" | None, default: None Determines the streaming mode for reading the file. Use this for optimized reading from different sources, such as local disk or remote servers. - load_channel_properties : bool, default : True + load_channel_properties : bool, default: True If True, all the channel properties are loaded from the NWB file and stored as properties. For streaming purposes, it can be useful to set this to False to speed up streaming. - file : file-like object or None, default : None + file : file-like object or None, default: None A file-like object representing the NWB file. Use this parameter if you have an in-memory representation of the NWB file instead of a file path. - cache : bool, default : False + cache : bool, default: False Indicates whether to cache the file locally when using streaming. Caching can improve performance for remote files. - stream_cache_path : str, Path, or None, default : None + stream_cache_path : str, Path, or None, default: None Specifies the local path for caching the file. Relevant only if `cache` is True. storage_options : dict | None = None, These are the additional kwargs (e.g. AWS credentials) that are passed to the zarr.open convenience function. This is only used on the "zarr" stream_mode. - use_pynwb : bool, default : False + use_pynwb : bool, default: False Uses the pynwb library to read the NWB file. Setting this to False, the default, uses h5py to read the file. Using h5py can improve performance by bypassing some of the PyNWB validations. @@ -963,22 +963,22 @@ class NwbSortingExtractor(BaseSorting): ---------- file_path : str or Path Path to NWB file. - electrical_series_path : str or None, default : None + electrical_series_path : str or None, default: None The name of the ElectricalSeries (if multiple ElectricalSeries are present). - sampling_frequency : float or None, default : None + sampling_frequency : float or None, default: None The sampling frequency in Hz (required if no ElectricalSeries is available). - unit_table_path : str or None, default : "units" + unit_table_path : str or None, default: "units" The path of the unit table in the NWB file. - samples_for_rate_estimation : int, default : 100000 + samples_for_rate_estimation : int, default: 100000 The number of timestamp samples to use to estimate the rate. Used if "rate" is not specified in the ElectricalSeries. - stream_mode : "fsspec" | "remfile" | "zarr" | None, default : None + stream_mode : "fsspec" | "remfile" | "zarr" | None, default: None The streaming mode to use. If None it assumes the file is on the local disk. - stream_cache_path : str or Path or None, default : None + stream_cache_path : str or Path or None, default: None Local path for caching. If None it uses the system temporary directory. - load_unit_properties : bool, default : True + load_unit_properties : bool, default: True If True, all the unit properties are loaded from the NWB file and stored as properties. - t_start : float or None, default : None + t_start : float or None, default: None This is the time at which the corresponding ElectricalSeries start. NWB stores its spikes as times and the `t_start` is used to convert the times to seconds. Concrently, the returned frames are computed as: @@ -990,13 +990,13 @@ class NwbSortingExtractor(BaseSorting): When a `t_start` is not provided it will be inferred from the corresponding ElectricalSeries with name equal to `electrical_series_path`. The `t_start` then will be either the `ElectricalSeries.starting_time` or the first timestamp in the `ElectricalSeries.timestamps`. - cache : bool, default : False + cache : bool, default: False If True, the file is cached in the file passed to stream_cache_path if False, the file is not cached. storage_options : dict | None = None, These are the additional kwargs (e.g. AWS credentials) that are passed to the zarr.open convenience function. This is only used on the "zarr" stream_mode. - use_pynwb : bool, default : False + use_pynwb : bool, default: False Uses the pynwb library to read the NWB file. Setting this to False, the default, uses h5py to read the file. Using h5py can improve performance by bypassing some of the PyNWB validations. @@ -1374,11 +1374,11 @@ def read_nwb(file_path, load_recording=True, load_sorting=False, electrical_seri ---------- file_path : str or Path Path to NWB file. - load_recording : bool, default : True + load_recording : bool, default: True If True, the recording object is loaded. - load_sorting : bool, default : False + load_sorting : bool, default: False If True, the recording object is loaded. - electrical_series_path : str or None, default : None + electrical_series_path : str or None, default: None The name of the ElectricalSeries (if multiple ElectricalSeries are present) Returns diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index 3e923bacdb..7fdd77e703 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -16,13 +16,13 @@ class BasePhyKilosortSortingExtractor(BaseSorting): ---------- folder_path : str or Path Path to the output Phy folder (containing the params.py) - exclude_cluster_groups : list or str, default : None + exclude_cluster_groups : list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). - keep_good_only : bool, default : True + keep_good_only : bool, default: True Whether to only keep good units. - remove_empty_units : bool, default : True + remove_empty_units : bool, default: True If True, empty units are removed from the sorting extractor. - load_all_cluster_properties : bool, default : True + load_all_cluster_properties : bool, default: True If True, all cluster properties are loaded from the tsv/csv files. """ @@ -206,9 +206,9 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path : str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups : list or str, default : None + exclude_cluster_groups : list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). - load_all_cluster_properties : bool, default : True + load_all_cluster_properties : bool, default: True If True, all cluster properties are loaded from the tsv/csv files. Returns @@ -247,10 +247,10 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path : str or Path Path to the output Phy folder (containing the params.py). - keep_good_only : bool, default : True + keep_good_only : bool, default: True Whether to only keep good units. If True, only Kilosort-labeled 'good' units are returned. - remove_empty_units : bool, default : True + remove_empty_units : bool, default: True If True, empty units are removed from the sorting extractor. Returns diff --git a/src/spikeinterface/extractors/toy_example.py b/src/spikeinterface/extractors/toy_example.py index 6aaee141b9..450044d07b 100644 --- a/src/spikeinterface/extractors/toy_example.py +++ b/src/spikeinterface/extractors/toy_example.py @@ -43,25 +43,25 @@ def toy_example( Parameters ---------- - duration : float or list[float], default : 10 + duration : float or list[float], default: 10 Duration in seconds. If a list is provided, it will be the duration of each segment. - num_channels : int, default : 4 + num_channels : int, default: 4 Number of channels - num_units : int, default : 10 + num_units : int, default: 10 Number of units - sampling_frequency : float, default : 30000 + sampling_frequency : float, default: 30000 Sampling frequency - num_segments : int, default : 2 + num_segments : int, default: 2 Number of segments. - spike_times : np.array or list[nparray] or None, default : None + spike_times : np.array or list[nparray] or None, default: None Spike time in the recording - spike_labels : np.array or list[nparray] or None, default : None + spike_labels : np.array or list[nparray] or None, default: None Cluster label for each spike time (needs to specified both together). # score_detection : int (between 0 and 1) # Generate the sorting based on a subset of spikes compare with the trace generation - firing_rate : float, default : 3.0 + firing_rate : float, default: 3.0 The firing rate for the units (in Hz) - seed : int or None, default : None + seed : int or None, default: None Seed for random initialization. Returns diff --git a/src/spikeinterface/generation/drift_tools.py b/src/spikeinterface/generation/drift_tools.py index 4ab1a24165..99e4f4d36e 100644 --- a/src/spikeinterface/generation/drift_tools.py +++ b/src/spikeinterface/generation/drift_tools.py @@ -80,10 +80,10 @@ def move_dense_templates(templates_array, displacements, source_probe, dest_prob shape : (num_displacement, 2) source_probe : Probe The Probe object on which templates_array are defined - dest_probe : Probe | None, default : None + dest_probe : Probe | None, default: None The destination Probe. Can be different geometry than the original. If None then the same probe is used. - interpolation_method : "cubic" | "linear", default : "cubic" + interpolation_method : "cubic" | "linear", default: "cubic" The interpolation method. Returns @@ -194,7 +194,7 @@ def make_linear_displacement(start, stop, num_step=10): The start position. stop : np.array of 2 elements The stop position. - num_step : int, default : 10 + num_step : int, default: 10 The number of steps between start and stop. Returns @@ -225,18 +225,18 @@ class InjectDriftingTemplatesRecording(BaseRecording): num_motions is generally = 1 but can be > 1 in case of combining several drift vectors displacement_sampling_frequency : float The sampling frequency of drift vector - displacement_unit_factor : numpy array or None, default : None + displacement_unit_factor : numpy array or None, default: None A array containing the factor per unit of the drift. This is used to create non rigid with a factor gradient of depending on units position. shape (num_units, num_motions) If None then all unit have the same factor (1) and the drift is rigid. - parent_recording : BaseRecording or None, default : None + parent_recording : BaseRecording or None, default: None The recording over which to add the templates. If None, will default to traces containing all 0. - num_samples : list[int] or int or None, default : None + num_samples : list[int] or int or None, default: None The number of samples in the recording per segment. You can use int for mono-segment objects. - amplitude_factor : list of numpy array or numpy array or float or None, default : None + amplitude_factor : list of numpy array or numpy array or float or None, default: None Controls the amplitude scaling for each spike for each unit. If None, no amplitude scaling is applied. If scalar all spikes have the same factor (certainly useless). diff --git a/src/spikeinterface/preprocessing/clip.py b/src/spikeinterface/preprocessing/clip.py index 40cd29e53d..78557c70d0 100644 --- a/src/spikeinterface/preprocessing/clip.py +++ b/src/spikeinterface/preprocessing/clip.py @@ -17,10 +17,10 @@ class ClipRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be transformed - a_min : float or None, default : None + a_min : float or None, default: None Minimum value. If `None`, clipping is not performed on lower interval edge. - a_max : float or None, default : None + a_max : float or None, default: None Maximum value. If `None`, clipping is not performed on upper interval edge. @@ -61,22 +61,22 @@ class BlankSaturationRecording(BasePreprocessor): The recording extractor to be transformed Minimum value. If `None`, clipping is not performed on lower interval edge. - abs_threshold : float or None, default : None + abs_threshold : float or None, default: None The absolute value for considering that the signal is saturating - quantile_threshold : float or None, default : None + quantile_threshold : float or None, default: None Tha value in [0, 1] used if abs_threshold is None to automatically set the abs_threshold given the data. Must be provided if abs_threshold is None - direction : "upper" | "lower" | "both", default : "upper" + direction : "upper" | "lower" | "both", default: "upper" Only values higher than the detection threshold are set to fill_value ("higher"), or only values lower than the detection threshold ("lower"), or both ("both") - fill_value : float or None, default : None + fill_value : float or None, default: None The value to write instead of the saturating signal. If None, then the value is automatically computed as the median signal value - num_chunks_per_segment : int, default : 50 + num_chunks_per_segment : int, default: 50 The number of chunks per segments to consider to estimate the threshold/fill_values - chunk_size : int, default : 500 + chunk_size : int, default: 500 The chunk size to estimate the threshold/fill_values - seed : int, default : 0 + seed : int, default: 0 The seed to select the random chunks Returns diff --git a/src/spikeinterface/preprocessing/common_reference.py b/src/spikeinterface/preprocessing/common_reference.py index 81bd737ada..bc8ecb4cb7 100644 --- a/src/spikeinterface/preprocessing/common_reference.py +++ b/src/spikeinterface/preprocessing/common_reference.py @@ -38,24 +38,24 @@ class CommonReferenceRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be re-referenced - reference : "global" | "single" | "local", default : "global" + reference : "global" | "single" | "local", default: "global" If "global" the reference is the average or median across all the channels. If "single", the reference is a single channel or a list of channels that need to be set with the `ref_channel_ids`. If "local", the reference is the set of channels within an annulus that must be set with the `local_radius` parameter. - operator : "median" | "average", default : "median" + operator : "median" | "average", default: "median" If "median", a common median reference (CMR) is implemented (the median of the selected channels is removed for each timestamp). If "average", common average reference (CAR) is implemented (the mean of the selected channels is removed for each timestamp). - groups : list or None, default : None + groups : list or None, default: None List of lists containing the channel ids for splitting the reference. The CMR, CAR, or referencing with respect to single channels are applied group-wise. However, this is not applied for the local CAR. It is useful when dealing with different channel groups, e.g. multiple tetrodes. - ref_channel_ids : list or str or int, default : None + ref_channel_ids : list or str or int, default: None If no "groups" are specified, all channels are referenced to "ref_channel_ids". If "groups" is provided, then a list of channels to be applied to each group is expected. If "single" reference, a list of one channel or an int is expected. - local_radius : tuple(int, int), default : (30, 55) + local_radius : tuple(int, int), default: (30, 55) Use in the local CAR implementation as the selecting annulus with the following format: `(exclude radius, include radius)` @@ -65,7 +65,7 @@ class CommonReferenceRecording(BasePreprocessor): include radius delineates the outer boundary of the annulus whose role is to exclude channels that are too far away. - dtype : None or dtype, default : None + dtype : None or dtype, default: None If None the parent dtype is kept. Returns diff --git a/src/spikeinterface/preprocessing/correct_lsb.py b/src/spikeinterface/preprocessing/correct_lsb.py index 01c30cd5b0..a8d21b165f 100644 --- a/src/spikeinterface/preprocessing/correct_lsb.py +++ b/src/spikeinterface/preprocessing/correct_lsb.py @@ -16,13 +16,13 @@ def correct_lsb(recording, num_chunks_per_segment=20, chunk_size=10000, seed=Non ---------- recording : RecordingExtractor The recording extractor to be LSB-corrected. - num_chunks_per_segment : int, default : 20 + num_chunks_per_segment : int, default: 20 Number of chunks per segment for random chunk - chunk_size : int, default : 10000 + chunk_size : int, default: 10000 Size of a chunk in number for random chunk - seed : int or None, default : None + seed : int or None, default: None Random seed for random chunk - verbose : bool, default : False + verbose : bool, default: False If True, estimate LSB value is printed Returns diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 2fda7bbca0..9569459080 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -16,11 +16,11 @@ class DepthOrderRecording(ChannelSliceRecording): The recording to re-order. channel_ids : list/array or None If given, a subset of channels to order locations for - dimensions : str or tuple, list, default : ("x", "y") + dimensions : str or tuple, list, default: ("x", "y") If str, it needs to be "x", "y", "z". If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity - flip : bool, default : False + flip : bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. """ diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index aa5bae946d..276a8ac0b4 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -53,47 +53,47 @@ def detect_bad_channels( ---------- recording : BaseRecording The recording for which bad channels are detected - method : "coeherence+psd" | "std" | "mad" | "neighborhood_r2", default : "coeherence+psd" + method : "coeherence+psd" | "std" | "mad" | "neighborhood_r2", default: "coeherence+psd" The method to be used for bad channel detection - std_mad_threshold : float, default : 5 + std_mad_threshold : float, default: 5 The standard deviation/mad multiplier threshold - psd_hf_threshold (coeherence+psd) : float, default : 0.02 + psd_hf_threshold (coeherence+psd) : float, default: 0.02 An absolute threshold (uV^2/Hz) used as a cutoff for noise channels. Channels with average power at >80% Nyquist larger than this threshold will be labeled as noise - dead_channel_threshold (coeherence+psd) : float, default : -0.5 + dead_channel_threshold (coeherence+psd) : float, default: -0.5 Threshold for channel coherence below which channels are labeled as dead - noisy_channel_threshold (coeherence+psd) : float, default : 1 + noisy_channel_threshold (coeherence+psd) : float, default: 1 Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) - outside_channel_threshold (coeherence+psd) : float, default : -0.75 + outside_channel_threshold (coeherence+psd) : float, default: -0.75 Threshold for channel coherence above which channels at the edge of the recording are marked as outside of the brain - outside_channels_location (coeherence+psd) : "top" | "bottom" | "both", default : "top" + outside_channels_location (coeherence+psd) : "top" | "bottom" | "both", default: "top" Location of the outside channels. If "top", only the channels at the top of the probe can be marked as outside channels. If "bottom", only the channels at the bottom of the probe can be marked as outside channels. If "both", both the channels at the top and bottom of the probe can be marked as outside channels - n_neighbors (coeherence+psd) : int, default : 11 + n_neighbors (coeherence+psd) : int, default: 11 Number of channel neighbors to compute median filter (needs to be odd) - nyquist_threshold (coeherence+psd) : float, default : 0.8 + nyquist_threshold (coeherence+psd) : float, default: 0.8 Frequency with respect to Nyquist (Fn=1) above which the mean of the PSD is calculated and compared with psd_hf_threshold - direction (coeherence+psd) : "x" | "y" | "z", default : "y" + direction (coeherence+psd) : "x" | "y" | "z", default: "y" The depth dimension - highpass_filter_cutoff : float, default : 300 + highpass_filter_cutoff : float, default: 300 If the recording is not filtered, the cutoff frequency of the highpass filter - chunk_duration_s : float, default : 0.5 + chunk_duration_s : float, default: 0.5 Duration of each chunk - num_random_chunks : int, default : 100 + num_random_chunks : int, default: 100 Number of random chunks Having many chunks is important for reproducibility. - welch_window_ms : float, default : 10 + welch_window_ms : float, default: 10 Window size for the scipy.signal.welch that will be converted to nperseg - neighborhood_r2_threshold : float, default : 0.95 + neighborhood_r2_threshold : float, default: 0.95 R^2 threshold for the neighborhood_r2 method. - neighborhood_r2_radius_um : float, default : 30 + neighborhood_r2_radius_um : float, default: 30 Spatial radius below which two channels are considered neighbors in the neighborhood_r2 method. - seed : int or None, default : None + seed : int or None, default: None The random seed to extract chunks Returns @@ -298,19 +298,19 @@ def detect_bad_channels_ibl( psd_hf_threshold : float Threshold for high frequency PSD. If mean PSD above `nyquist_threshold` * fn is greater than this value, channels are flagged as noisy (together with channel coherence condition). - dead_channel_thr : float, default : -0.5 + dead_channel_thr : float, default: -0.5 Threshold for channel coherence below which channels are labeled as dead - noisy_channel_thr : float, default : 1 + noisy_channel_thr : float, default: 1 Threshold for channel coherence above which channels are labeled as noisy (together with psd condition) - outside_channel_thr : float, default : -0.75 + outside_channel_thr : float, default: -0.75 Threshold for channel coherence above which channels - n_neighbors : int, default : 11 + n_neighbors : int, default: 11 Number of neighbors to compute median fitler - nyquist_threshold : float, default : 0.8 + nyquist_threshold : float, default: 0.8 Threshold on Nyquist frequency to calculate HF noise band - welch_window_ms : float, default : 0.3 + welch_window_ms : float, default: 0.3 Window size for the scipy.signal.welch that will be converted to nperseg - outside_channels_location : "top" | "bottom" | "both", default : "top" + outside_channels_location : "top" | "bottom" | "both", default: "top" Location of the outside channels. If "top", only the channels at the top of the probe can be marked as outside channels. If "bottom", only the channels at the bottom of the probe can be marked as outside channels. If "both", both the channels at the top and bottom of the probe can be diff --git a/src/spikeinterface/preprocessing/filter.py b/src/spikeinterface/preprocessing/filter.py index 24b7fb0d3f..3f1a155d0d 100644 --- a/src/spikeinterface/preprocessing/filter.py +++ b/src/spikeinterface/preprocessing/filter.py @@ -32,20 +32,20 @@ class FilterRecording(BasePreprocessor): ---------- recording : Recording The recording extractor to be re-referenced - band : float or list, default : [300.0, 6000.0] + band : float or list, default: [300.0, 6000.0] If float, cutoff frequency in Hz for "highpass" filter type If list. band (low, high) in Hz for "bandpass" filter type - btype : "bandpass" | "highpass", default : "bandpass" + btype : "bandpass" | "highpass", default: "bandpass" Type of the filter - margin_ms : float, default : 5.0 + margin_ms : float, default: 5.0 Margin in ms on border to avoid border effect - filter_mode : "sos" | "ba", default : "sos" + filter_mode : "sos" | "ba", default: "sos" Filter form of the filter coefficients: - second-order sections ("sos") - numerator/denominator : ("ba") - coef : array or None, default : None + coef : array or None, default: None Filter coefficients in the filter_mode form. - dtype : dtype or None, default : None + dtype : dtype or None, default: None The dtype of the returned traces. If None, the dtype of the parent recording is used {} diff --git a/src/spikeinterface/preprocessing/filter_gaussian.py b/src/spikeinterface/preprocessing/filter_gaussian.py index cc8efa7d89..1db7d45bd8 100644 --- a/src/spikeinterface/preprocessing/filter_gaussian.py +++ b/src/spikeinterface/preprocessing/filter_gaussian.py @@ -31,7 +31,7 @@ class GaussianFilterRecording(BasePreprocessor): freq_max : float or None The higher frequency cutoff for the bandpass filter. If None, the resulting object is a highpass filter. - margin_sd : float, default : 5.0 + margin_sd : float, default: 5.0 The number of standard deviation to take for margins. Returns diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index 1a1edfb917..d60c9b27dd 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -25,13 +25,13 @@ class InterpolateBadChannelsRecording(BasePreprocessor): The parent recording bad_channel_ids : list or 1d np.array Channel ids of the bad channels to interpolate. - sigma_um : float or None, default : None + sigma_um : float or None, default: None Distance between sequential channels in um. If None, will use the most common distance between y-axis channels - p : float, default : 1.3 + p : float, default: 1.3 Exponent of the Gaussian kernel. Determines rate of decay for distance weightings - weights : np.array or None, default : None + weights : np.array or None, default: None The weights to give to bad_channel_ids at interpolation. If None, weights are automatically computed diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index 908c350847..86fd2b1c62 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -250,11 +250,11 @@ def correct_motion( ---------- recording : RecordingExtractor The recording extractor to be transformed - preset : str, default : "nonrigid_accurate" + preset : str, default: "nonrigid_accurate" The preset name - folder : Path str or None, default : None + folder : Path str or None, default: None If not None then intermediate motion info are saved into a folder - output_motion_info : bool, default : False + output_motion_info : bool, default: False If True, then the function returns a `motion_info` dictionary that contains variables to check intermediate steps (motion_histogram, non_rigid_windows, pairwise_displacement) This dictionary is the same when reloaded from the folder diff --git a/src/spikeinterface/preprocessing/normalize_scale.py b/src/spikeinterface/preprocessing/normalize_scale.py index bc2f4224d4..44b9ac9937 100644 --- a/src/spikeinterface/preprocessing/normalize_scale.py +++ b/src/spikeinterface/preprocessing/normalize_scale.py @@ -48,17 +48,17 @@ class NormalizeByQuantileRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be transformed - scale : float, default : 1.0 + scale : float, default: 1.0 Scale for the output distribution - median : float, default : 0.0 + median : float, default: 0.0 Median for the output distribution - q1 : float, default : 0.01 + q1 : float, default: 0.01 Lower quantile used for measuring the scale - q1 : float, default : 0.99 + q1 : float, default: 0.99 Upper quantile used for measuring the - mode : "by_channel" | "pool_channel", default : "by_channel" + mode : "by_channel" | "pool_channel", default: "by_channel" If "by_channel" each channel is rescaled independently. - dtype : str or np.dtype, default : "float32" + dtype : str or np.dtype, default: "float32" The dtype of the output traces **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function @@ -136,7 +136,7 @@ class ScaleRecording(BasePreprocessor): Scalar for the traces of the recording extractor or array with scalars for each channel offset : float or array Offset for the traces of the recording extractor or array with offsets for each channel - dtype : str or np.dtype, default : "float32" + dtype : str or np.dtype, default: "float32" The dtype of the output traces Returns @@ -192,9 +192,9 @@ class CenterRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be centered - mode : "median" | "mean", default : "median" + mode : "median" | "mean", default: "median" The method used to center the traces - dtype : str or np.dtype, default : "float32" + dtype : str or np.dtype, default: "float32" The dtype of the output traces **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function @@ -240,7 +240,7 @@ class ZScoreRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be centered - mode : "median+mad" | "mean+std", default : "median+mad" + mode : "median+mad" | "mean+std", default: "median+mad" The mode to compute the zscore dtype : None or dtype If None the the parent dtype is kept. diff --git a/src/spikeinterface/preprocessing/phase_shift.py b/src/spikeinterface/preprocessing/phase_shift.py index 9708a04749..41c18e2f38 100644 --- a/src/spikeinterface/preprocessing/phase_shift.py +++ b/src/spikeinterface/preprocessing/phase_shift.py @@ -25,10 +25,10 @@ class PhaseShiftRecording(BasePreprocessor): ---------- recording : Recording The recording. It need to have "inter_sample_shift" in properties. - margin_ms : float, default : 40.0 + margin_ms : float, default: 40.0 Margin in ms for computation. 40ms ensure a very small error when doing chunk processing - inter_sample_shift : None or numpy array, default : None + inter_sample_shift : None or numpy array, default: None If "inter_sample_shift" is not in recording properties, we can externally provide one. diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 892bfc090e..3c0f766737 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -26,17 +26,17 @@ class RemoveArtifactsRecording(BasePreprocessor): The recording extractor to remove artifacts from list_triggers : list of lists/arrays One list per segment of int with the stimulation trigger frames - ms_before : float or None, default : 0.5 + ms_before : float or None, default: 0.5 Time interval in ms to remove before the trigger events. If None, then also ms_after must be None and a single sample is removed - ms_after : float or None, default : 3.0 + ms_after : float or None, default: 3.0 Time interval in ms to remove after the trigger events. If None, then also ms_before must be None and a single sample is removed list_labels : list of lists/arrays or None One list per segment of labels with the stimulation labels for the given artifacts. labels should be strings, for JSON serialization. Required for "median" and "average" modes. - mode : "zeros", "linear", "cubic", "average", "median", default : "zeros" + mode : "zeros", "linear", "cubic", "average", "median", default: "zeros" Determines what artifacts are replaced by. Can be one of the following: - "zeros": Artifacts are replaced by zeros. @@ -63,23 +63,23 @@ class RemoveArtifactsRecording(BasePreprocessor): continuation of the trace. If the trace starts or ends with an artifact, the gap is filled with the closest available value before or after the artifact. - fit_sample_spacing : float, default : 1.0 + fit_sample_spacing : float, default: 1.0 Determines the spacing (in ms) of reference points for the cubic spline fit if mode = "cubic". Note : The actual fit samples are the median of the 5 data points around the time of each sample point to avoid excessive influence from hyper-local fluctuations. - artifacts : dict or None, default : None + artifacts : dict or None, default: None If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values the artifacts themselves, on all channels (and thus bypassing ms_before and ms_after) - sparsity : dict or None, default : None + sparsity : dict or None, default: None If provided (when mode is "median" or "average") then it must be a dict with keys that are the labels of the artifacts, and values that are boolean mask of the channels where the artifacts should be considered (for subtraction/scaling) - scale_amplitude : False, default : False + scale_amplitude : False, default: False If true, then for mode "median" or "average" the amplitude of the template will be scaled in amplitude at each time occurence to minimize residuals - time_jitter : float, default : 0 + time_jitter : float, default: 0 If non 0, then for mode "median" or "average", a time jitter in ms can be allowed to minimize the residuals waveforms_kwargs : None diff --git a/src/spikeinterface/preprocessing/resample.py b/src/spikeinterface/preprocessing/resample.py index 83a2c3577a..cc110118a5 100644 --- a/src/spikeinterface/preprocessing/resample.py +++ b/src/spikeinterface/preprocessing/resample.py @@ -28,11 +28,11 @@ class ResampleRecording(BasePreprocessor): The recording extractor to be re-referenced resample_rate : int The resampling frequency - margin : float, default : 100.0 + margin : float, default: 100.0 Margin in ms for computations, will be used to decrease edge effects. - dtype : dtype or None, default : None + dtype : dtype or None, default: None The dtype of the returned traces. If None, the dtype of the parent recording is used. - skip_checks : bool, default : False + skip_checks : bool, default: False If True, checks on sampling frequencies and cutoff filter frequencies are skipped Returns diff --git a/src/spikeinterface/preprocessing/silence_periods.py b/src/spikeinterface/preprocessing/silence_periods.py index ed0a0c9d28..5f70bfbb40 100644 --- a/src/spikeinterface/preprocessing/silence_periods.py +++ b/src/spikeinterface/preprocessing/silence_periods.py @@ -26,7 +26,7 @@ class SilencedPeriodsRecording(BasePreprocessor): noise_levels : array Noise levels if already computed - mode : "zeros" | "noise, default : "zeros" + mode : "zeros" | "noise, default: "zeros" Determines what periods are replaced by. Can be one of the following: - "zeros": Artifacts are replaced by zeros. diff --git a/src/spikeinterface/preprocessing/unsigned_to_signed.py b/src/spikeinterface/preprocessing/unsigned_to_signed.py index 32dada11de..b221fd7bed 100644 --- a/src/spikeinterface/preprocessing/unsigned_to_signed.py +++ b/src/spikeinterface/preprocessing/unsigned_to_signed.py @@ -14,7 +14,7 @@ class UnsignedToSignedRecording(BasePreprocessor): ---------- recording : Recording The recording to be signed. - bit_depth : int or None, default : None + bit_depth : int or None, default: None In case the bit depth of the ADC does not match that of the data type, it specifies the bit depth of the ADC to estimate the offset. For example, a `bit_depth` of 12 will correct for an offset of `2**11` diff --git a/src/spikeinterface/preprocessing/whiten.py b/src/spikeinterface/preprocessing/whiten.py index 24da3c7304..874d4304e3 100644 --- a/src/spikeinterface/preprocessing/whiten.py +++ b/src/spikeinterface/preprocessing/whiten.py @@ -17,27 +17,27 @@ class WhitenRecording(BasePreprocessor): ---------- recording : RecordingExtractor The recording extractor to be whitened. - dtype : None or dtype, default : None + dtype : None or dtype, default: None If None the the parent dtype is kept. For integer dtype a int_scale must be also given. - mode : "global" | "local", default : "global" + mode : "global" | "local", default: "global" "global" use the entire covariance matrix to compute the W matrix "local" use local covariance (by radius) to compute the W matrix - radius_um : None or float, default : None + radius_um : None or float, default: None Used for mode = "local" to get the neighborhood - apply_mean : bool, default : False + apply_mean : bool, default: False Substract or not the mean matrix M before the dot product with W. - int_scale : None or float, default : None + int_scale : None or float, default: None Apply a scaling factor to fit the integer range. This is used when the dtype is an integer, so that the output is scaled. For example, a value of `int_scale=200` will scale the traces value to a standard deviation of 200. - eps : float or None, default : None + eps : float or None, default: None Small epsilon to regularize SVD. If None, eps is defaulted to 1e-8. If the data is float type and scaled down to very small values, then the eps is automatically set to a small fraction (1e-3) of the median of the squared data. - W : 2d np.array or None, default : None + W : 2d np.array or None, default: None Pre-computed whitening matrix - M : 1d np.array or None, default : None + M : 1d np.array or None, default: None Pre-computed means. M can be None when previously computed with apply_mean=False **random_chunk_kwargs : Keyword arguments for `spikeinterface.core.get_random_data_chunk()` function @@ -147,9 +147,9 @@ def compute_whitening_matrix(recording, mode, random_chunk_kwargs, apply_mean, r Keyword arguments for get_random_data_chunks() apply_mean : bool If True, the mean is removed prior to computing the covariance - radius_um : float or None, default : None + radius_um : float or None, default: None Used for mode = "local" to get the neighborhood - eps : float or None, default : None + eps : float or None, default: None Small epsilon to regularize SVD. If None, the default is set to 1e-8, but if the data is float type and scaled down to very small values, eps is automatically set to a small fraction (1e-3) of the median of the squared data. diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d72e7abff2..c7127226b0 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -235,20 +235,20 @@ def run_sorter_by_property( Property to split by before sorting folder : str | Path The working directory. - mode_if_folder_exists : bool or None, default : None + mode_if_folder_exists : bool or None, default: None Must be None. This is deprecated. If not None then a warning is raise. Will be removed in next release. - engine : "loop" | "joblib" | "dask", default : "loop" + engine : "loop" | "joblib" | "dask", default: "loop" Which engine to use to run sorter. engine_kwargs : dict This contains kwargs specific to the launcher engine: * "loop" : no kwargs * "joblib" : {"n_jobs" : } number of processes * "dask" : {"client":} the dask client for submitting task - verbose : bool, default : False + verbose : bool, default: False Controls sorter verboseness - docker_image : None or str, default : None + docker_image : None or str, default: None If str run the sorter inside a container (docker) using the docker package **sorter_params : keyword args Spike sorter specific arguments (they can be retrieved with `get_default_sorter_params(sorter_name_or_class)`) diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index eb5fbe10e5..b1e9ea9308 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -68,28 +68,28 @@ Path to output folder remove_existing_folder : bool If True and folder exists then delete. - delete_output_folder : bool, default : False + delete_output_folder : bool, default: False If True, output folder is deleted - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose - raise_error : bool, default : True + raise_error : bool, default: True If True, an error is raised if spike sorting fails If False, the process continues and the error is logged in the log file. - docker_image : bool or str, default : False + docker_image : bool or str, default: False If True, pull the default docker container for the sorter and run the sorter in that container using docker. Use a str to specify a non-default container. If that container is not local it will be pulled from docker hub. If False, the sorter is run locally - singularity_image : bool or str, default : False + singularity_image : bool or str, default: False If True, pull the default docker container for the sorter and run the sorter in that container using singularity. Use a str to specify a non-default container. If that container is not local it will be pulled from Docker Hub. If False, the sorter is run locally - with_output : bool, default : True + with_output : bool, default: True If True, the output Sorting is returned as a Sorting - delete_container_files : bool, default : True + delete_container_files : bool, default: True If True, the container temporary files are deleted after the sorting is done - extra_requirements : list, default : None + extra_requirements : list, default: None List of extra requirements to install in the container - installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default : "auto" + installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default: "auto" How spikeinterface is installed in the container: * "auto" : if host installation is a pip release then use "github" with tag if host installation is DEV_MODE=True then use "dev" @@ -100,11 +100,11 @@ cross checks * "dev" : same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host * "no-install" : do not install spikeinterface in the container because it is already installed - spikeinterface_version : str, default : None + spikeinterface_version : str, default: None The spikeinterface version to install in the container. If None, the current version is used - spikeinterface_folder_source : Path or None, default : None + spikeinterface_folder_source : Path or None, default: None In case of installation_mode="folder", the spikeinterface folder source to use to install in the container - output_folder : None, default : None + output_folder : None, default: None Do not use. Deprecated output function to be removed in 0.103. **sorter_params : keyword args Spike sorter specific arguments (they can be retrieved with `get_default_sorter_params(sorter_name_or_class)`) @@ -211,18 +211,18 @@ def run_sorter_local( The recording extractor to be spike sorted folder : str or Path Path to output folder. If None, a folder is created in the current directory - remove_existing_folder : bool, default : True + remove_existing_folder : bool, default: True If True and output_folder exists yet then delete - delete_output_folder : bool, default : False + delete_output_folder : bool, default: False If True, output folder is deleted - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose - raise_error : bool, default : True + raise_error : bool, default: True If True, an error is raised if spike sorting fails. If False, the process continues and the error is logged in the log file - with_output : bool, default : True + with_output : bool, default: True If True, the output Sorting is returned as a Sorting - output_folder : None, default : None + output_folder : None, default: None Do not use. Deprecated output function to be removed in 0.103. **sorter_params : keyword args """ @@ -292,25 +292,25 @@ def run_sorter_container( The recording extractor to be spike sorted mode : str The container mode : "docker" or "singularity" - container_image : str, default : None + container_image : str, default: None The container image name and tag. If None, the default container image is used - output_folder : str, default : None + output_folder : str, default: None Path to output folder - remove_existing_folder : bool, default : True + remove_existing_folder : bool, default: True If True and output_folder exists yet then delete - delete_output_folder : bool, default : False + delete_output_folder : bool, default: False If True, output folder is deleted - verbose : bool, default : False + verbose : bool, default: False If True, output is verbose - raise_error : bool, default : True + raise_error : bool, default: True If True, an error is raised if spike sorting fails - with_output : bool, default : True + with_output : bool, default: True If True, the output Sorting is returned as a Sorting - delete_container_files : bool, default : True + delete_container_files : bool, default: True If True, the container temporary files are deleted after the sorting is done - extra_requirements : list, default : None + extra_requirements : list, default: None List of extra requirements to install in the container - installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default : "auto" + installation_mode : "auto" | "pypi" | "github" | "folder" | "dev" | "no-install", default: "auto" How spikeinterface is installed in the container: * "auto" : if host installation is a pip release then use "github" with tag if host installation is DEV_MODE=True then use "dev" @@ -321,9 +321,9 @@ def run_sorter_container( cross checks * "dev" : same as "folder", but the folder is the spikeinterface.__file__ to ensure same version as host * "no-install" : do not install spikeinterface in the container because it is already installed - spikeinterface_version : str, default : None + spikeinterface_version : str, default: None The spikeinterface version to install in the container. If None, the current version is used - spikeinterface_folder_source : Path or None, default : None + spikeinterface_folder_source : Path or None, default: None In case of installation_mode="folder", the spikeinterface folder source to use to install in the container **sorter_params : keyword args for the sorter @@ -648,9 +648,9 @@ def read_sorter_folder(folder, register_recording=True, sorting_info=True, raise ---------- folder : Pth or str The sorter folder - register_recording : bool, default : True + register_recording : bool, default: True Attach recording (when json or pickle) to the sorting - sorting_info : bool, default : True + sorting_info : bool, default: True Attach sorting info to the sorting. """ folder = Path(folder) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 343bf30cf9..efbf6f3f32 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -17,22 +17,22 @@ class AmplitudesWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The input waveform extractor - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index (or None if mono-segment) - max_spikes_per_unit : int or None, default : None + max_spikes_per_unit : int or None, default: None Number of max spikes per unit to display. Use None for all spikes - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False If True the unit selector is not displayed (sortingview backend) - plot_histogram : bool, default : False + plot_histogram : bool, default: False If True, an histogram of the amplitudes is plotted on the right axis (matplotlib backend) - bins : int or None, default : None + bins : int or None, default: None If plot_histogram is True, the number of bins for the amplitude histogram. If None this is automatically adjusted - plot_legend : bool, default : True + plot_legend : bool, default: True True includes legend in plot """ diff --git a/src/spikeinterface/widgets/comparison.py b/src/spikeinterface/widgets/comparison.py index 9650b35505..f4dfe6e34d 100644 --- a/src/spikeinterface/widgets/comparison.py +++ b/src/spikeinterface/widgets/comparison.py @@ -88,12 +88,12 @@ class AgreementMatrixWidget(BaseWidget): sorting_comparison : GroundTruthComparison or SymmetricSortingComparison The sorting comparison object. Can optionally be symmetric if given a SymmetricSortingComparison - ordered : bool, default : True + ordered : bool, default: True Order units with best agreement scores. If True, agreement scores can be seen along a diagonal - count_text : bool, default : True + count_text : bool, default: True If True counts are displayed as text - unit_ticks : bool, default : True + unit_ticks : bool, default: True If True unit tick labels are displayed """ diff --git a/src/spikeinterface/widgets/crosscorrelograms.py b/src/spikeinterface/widgets/crosscorrelograms.py index 2fcdfcd509..cdb2041aa3 100644 --- a/src/spikeinterface/widgets/crosscorrelograms.py +++ b/src/spikeinterface/widgets/crosscorrelograms.py @@ -17,20 +17,20 @@ class CrossCorrelogramsWidget(BaseWidget): ---------- sorting_analyzer_or_sorting : SortingAnalyzer or BaseSorting The object to compute/get crosscorrelograms from - unit_ids list or None, default : None + unit_ids list or None, default: None List of unit ids - min_similarity_for_correlograms : float, default : 0.2 + min_similarity_for_correlograms : float, default: 0.2 For sortingview backend. Threshold for computing pair-wise cross-correlograms. If template similarity between two units is below this threshold, the cross-correlogram is not displayed - window_ms : float, default : 100.0 + window_ms : float, default: 100.0 Window for CCGs in ms. If correlograms are already computed (e.g. with SortingAnalyzer), this argument is ignored - bin_ms : float, default : 1.0 + bin_ms : float, default: 1.0 Bin size in ms. If correlograms are already computed (e.g. with SortingAnalyzer), this argument is ignored - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values """ diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index c26417a9fa..a2c366851b 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -135,13 +135,13 @@ class StudyPerformances(BaseWidget): ---------- study : GroundTruthStudy A study object. - mode : "ordered" | "snr" | "swarm", default : "ordered" + mode : "ordered" | "snr" | "swarm", default: "ordered" Which plot mode to use: * "ordered": plot performance metrics vs unit indices ordered by decreasing accuracy * "snr": plot performance metrics vs snr * "swarm": plot performance metrics as a swarm plot (see seaborn.swarmplot for details) - performance_names : list or tuple, default : ("accuracy", "precision", "recall") + performance_names : list or tuple, default: ("accuracy", "precision", "recall") Which performances to plot ("accuracy", "precision", "recall") case_keys : list or None A selection of cases to plot, if None, then all. @@ -309,7 +309,7 @@ class StudySummary(BaseWidget): ---------- study : GroundTruthStudy A study object. - case_keys : list or None, default : None + case_keys : list or None, default: None A selection of cases to plot, if None, then all. """ diff --git a/src/spikeinterface/widgets/motion.py b/src/spikeinterface/widgets/motion.py index 49f4ff4e94..110555dd6a 100644 --- a/src/spikeinterface/widgets/motion.py +++ b/src/spikeinterface/widgets/motion.py @@ -13,23 +13,23 @@ class MotionWidget(BaseWidget): ---------- motion_info : dict The motion info return by correct_motion() or load back with load_motion_info() - recording : RecordingExtractor, default : None + recording : RecordingExtractor, default: None The recording extractor object (only used to get "real" times) - sampling_frequency : float, default : None + sampling_frequency : float, default: None The sampling frequency (needed if recording is None) - depth_lim : tuple or None, default : None + depth_lim : tuple or None, default: None The min and max depth to display, if None (min and max of the recording) - motion_lim : tuple or None, default : None + motion_lim : tuple or None, default: None The min and max motion to display, if None (min and max of the motion) - color_amplitude : bool, default : False + color_amplitude : bool, default: False If True, the color of the scatter points is the amplitude of the peaks - scatter_decimate : int, default : None + scatter_decimate : int, default: None If > 1, the scatter points are decimated - amplitude_cmap : str, default : "inferno" + amplitude_cmap : str, default: "inferno" The colormap to use for the amplitude - amplitude_clim : tuple or None, default : None + amplitude_clim : tuple or None, default: None The min and max amplitude to display, if None (min and max of the amplitudes) - amplitude_alpha : float, default : 1 + amplitude_alpha : float, default: 1 The alpha of the scatter points """ diff --git a/src/spikeinterface/widgets/multicomparison.py b/src/spikeinterface/widgets/multicomparison.py index a86bc58d50..c3c3681240 100644 --- a/src/spikeinterface/widgets/multicomparison.py +++ b/src/spikeinterface/widgets/multicomparison.py @@ -15,15 +15,15 @@ class MultiCompGraphWidget(BaseWidget): ---------- multi_comparison : BaseMultiComparison The multi comparison object - draw_labels : bool, default : False + draw_labels : bool, default: False If True unit labels are shown - node_cmap : matplotlib colormap, default : "viridis" + node_cmap : matplotlib colormap, default: "viridis" The colormap to be used for the nodes - edge_cmap : matplotlib colormap, default : "hot" + edge_cmap : matplotlib colormap, default: "hot" The colormap to be used for the edges - alpha_edges : float, default : 0.5 + alpha_edges : float, default: 0.5 Alpha value for edges - colorbar : bool, default : False + colorbar : bool, default: False If True a colorbar for the edges is plotted """ @@ -121,13 +121,13 @@ class MultiCompGlobalAgreementWidget(BaseWidget): ---------- multi_comparison : BaseMultiComparison The multi comparison object - plot_type : "pie" | "bar", default : "pie" + plot_type : "pie" | "bar", default: "pie" The plot type - cmap : matplotlib colormap, default : "YlOrRd" + cmap : matplotlib colormap, default: "YlOrRd" The colormap to be used for the nodes - fontsize : int, default : 9 + fontsize : int, default: 9 The text fontsize - show_legend : bool, default : True + show_legend : bool, default: True If True a legend is shown """ @@ -199,11 +199,11 @@ class MultiCompAgreementBySorterWidget(BaseWidget): ---------- multi_comparison : BaseMultiComparison The multi comparison object - plot_type : "pie" | "bar", default : "pie + plot_type : "pie" | "bar", default: "pie The plot type - cmap : matplotlib colormap, default : "Reds" + cmap : matplotlib colormap, default: "Reds" The colormap to be used for the nodes - fontsize : int, default : 9 + fontsize : int, default: 9 The text fontsize show_legend : bool Show the legend in the last axes diff --git a/src/spikeinterface/widgets/peak_activity.py b/src/spikeinterface/widgets/peak_activity.py index 0121e40cf3..f611927813 100644 --- a/src/spikeinterface/widgets/peak_activity.py +++ b/src/spikeinterface/widgets/peak_activity.py @@ -19,16 +19,16 @@ class PeakActivityMapWidget(BaseWidget): peaks : None or numpy array Optionally can give already detected peaks to avoid multiple computation. - detect_peaks_kwargs : None or dict, default : None + detect_peaks_kwargs : None or dict, default: None If peaks is None here the kwargs for detect_peak function. - bin_duration_s : None or float, default : None + bin_duration_s : None or float, default: None If None then static image If not None then it is an animation per bin. - with_contact_color : bool, default : True + with_contact_color : bool, default: True Plot rates with contact colors - with_interpolated_map : bool, default : True + with_interpolated_map : bool, default: True Plot rates with interpolated map - with_channel_ids : bool, default : False + with_channel_ids : bool, default: False Add channel ids text on the probe diff --git a/src/spikeinterface/widgets/quality_metrics.py b/src/spikeinterface/widgets/quality_metrics.py index 8d7a256531..d2625451c8 100644 --- a/src/spikeinterface/widgets/quality_metrics.py +++ b/src/spikeinterface/widgets/quality_metrics.py @@ -12,15 +12,15 @@ class QualityMetricsWidget(MetricsBaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get quality metrics from - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - include_metrics : list or None, default : None + include_metrics : list or None, default: None If given, a list of quality metrics to include - skip_metrics : list or None, default : None + skip_metrics : list or None, default: None If given, a list of quality metrics to skip - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed """ diff --git a/src/spikeinterface/widgets/sorting_summary.py b/src/spikeinterface/widgets/sorting_summary.py index 00a05e445b..24b4ca8022 100644 --- a/src/spikeinterface/widgets/sorting_summary.py +++ b/src/spikeinterface/widgets/sorting_summary.py @@ -24,25 +24,25 @@ class SortingSummaryWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer object - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - max_amplitudes_per_unit : int or None, default : None + max_amplitudes_per_unit : int or None, default: None Maximum number of spikes per unit for plotting amplitudes. If None, all spikes are plotted - min_similarity_for_correlograms : float, default : 0.2 + min_similarity_for_correlograms : float, default: 0.2 Threshold for computing pair-wise cross-correlograms. If template similarity between two units is below this threshold, the cross-correlogram is not computed (sortingview backend) - curation : bool, default : False + curation : bool, default: False If True, manual curation is enabled (sortingview backend) - label_choices : list or None, default : None + label_choices : list or None, default: None List of labels to be added to the curation table (sortingview backend) - unit_table_properties : list or None, default : None + unit_table_properties : list or None, default: None List of properties to be added to the unit table (sortingview backend) """ diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 7f7b0190c8..94c9def630 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -15,24 +15,24 @@ class SpikeLocationsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get spike locations from - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - segment_index : int or None, default : None + segment_index : int or None, default: None The segment index (or None if mono-segment) - max_spikes_per_unit : int or None, default : 500 + max_spikes_per_unit : int or None, default: 500 Number of max spikes per unit to display. Use None for all spikes. - with_channel_ids : bool, default : False + with_channel_ids : bool, default: False Add channel ids text on the probe - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed - plot_all_units : bool, default : True + plot_all_units : bool, default: True If True, all units are plotted. The unselected ones (not in unit_ids), are plotted in grey (matplotlib backend) - plot_legend : bool, default : False + plot_legend : bool, default: False If True, the legend is plotted (matplotlib backend) - hide_axis : bool, default : False + hide_axis : bool, default: False If True, the axis is set to off (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index 0e257beeda..a8eb022847 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -21,44 +21,44 @@ class SpikesOnTracesWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer - channel_ids : list or None, default : None + channel_ids : list or None, default: None The channel ids to display - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - order_channel_by_depth : bool, default : False + order_channel_by_depth : bool, default: False If true orders channel by depth - time_range : list or None, default : None + time_range : list or None, default: None List with start time and end time in seconds - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values If None, then the get_unit_colors() is internally used. (matplotlib backend) - mode : "line" | "map" | "auto", default : "auto" + mode : "line" | "map" | "auto", default: "auto" * "line": classical for low channel count * "map": for high channel count use color heat map * "auto": auto switch depending on the channel count ("line" if less than 64 channels, "map" otherwise) - return_scaled : bool, default : False + return_scaled : bool, default: False If True and the recording has scaled traces, it plots the scaled traces - cmap : str, default : "RdBu" + cmap : str, default: "RdBu" matplotlib colormap used in mode "map" - show_channel_ids : bool, default : False + show_channel_ids : bool, default: False Set yticks with channel ids - color_groups : bool, default : False + color_groups : bool, default: False If True groups are plotted with different colors - color : str or None, default : None + color : str or None, default: None The color used to draw the traces - clim : None, tuple or dict, default : None + clim : None, tuple or dict, default: None When mode is "map", this argument controls color limits. If dict, keys should be the same as recording keys - scale : float, default : 1 + scale : float, default: 1 Scale factor for the traces - with_colorbar : bool, default : True + with_colorbar : bool, default: True When mode is "map", a colorbar is added - tile_size : int, default : 512 + tile_size : int, default: 512 For sortingview backend, the size of each tile in the rendered image - seconds_per_row : float, default : 0.2 + seconds_per_row : float, default: 0.2 For "map" mode and sortingview backend, seconds to render in each row """ diff --git a/src/spikeinterface/widgets/template_metrics.py b/src/spikeinterface/widgets/template_metrics.py index ae6c233429..b80c863e75 100644 --- a/src/spikeinterface/widgets/template_metrics.py +++ b/src/spikeinterface/widgets/template_metrics.py @@ -12,15 +12,15 @@ class TemplateMetricsWidget(MetricsBaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get quality metrics from - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - include_metrics : list or None, default : None + include_metrics : list or None, default: None If given list of quality metrics to include - skip_metrics : list or None or None, default : None + skip_metrics : list or None or None, default: None If given, a list of quality metrics to skip - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed """ diff --git a/src/spikeinterface/widgets/template_similarity.py b/src/spikeinterface/widgets/template_similarity.py index 80f1e82740..b469d9901f 100644 --- a/src/spikeinterface/widgets/template_similarity.py +++ b/src/spikeinterface/widgets/template_similarity.py @@ -14,16 +14,16 @@ class TemplateSimilarityWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The object to get template similarity from - unit_ids : list or None, default : None - List of unit ids default : None - display_diagonal_values : bool, default : False + unit_ids : list or None, default: None + List of unit ids default: None + display_diagonal_values : bool, default: False If False, the diagonal is displayed as zeros. If True, the similarity values (all 1s) are displayed - cmap : matplotlib colormap, default : "viridis" + cmap : matplotlib colormap, default: "viridis" The matplotlib colormap - show_unit_ticks : bool, default : False + show_unit_ticks : bool, default: False If True, ticks display unit ids - show_colorbar : bool, default : True + show_colorbar : bool, default: True If True, color bar is displayed """ diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 468edb470b..86f2350a85 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -18,47 +18,47 @@ class TracesWidget(BaseWidget): recording : RecordingExtractor, dict, or list The recording extractor object. If dict (or list) then it is a multi-layer display to compare, for example, different processing steps - segment_index : None or int, default : None + segment_index : None or int, default: None The segment index (required for multi-segment recordings) - channel_ids : list or None, default : None + channel_ids : list or None, default: None The channel ids to display - order_channel_by_depth : bool, default : False + order_channel_by_depth : bool, default: False Reorder channel by depth - time_range : list, tuple or None, default : None + time_range : list, tuple or None, default: None List with start time and end time - mode : "line" | "map" | "auto", default : "auto" + mode : "line" | "map" | "auto", default: "auto" Three possible modes * "line": classical for low channel count * "map": for high channel count use color heat map * "auto": auto switch depending on the channel count ("line" if less than 64 channels, "map" otherwise) - return_scaled : bool, default : False + return_scaled : bool, default: False If True and the recording has scaled traces, it plots the scaled traces - events : np.array | list[np.narray] or None, default : None + events : np.array | list[np.narray] or None, default: None Events to display as vertical lines. The numpy arrays cen either be of dtype float, with event times in seconds, or a structured array with the "time" field, and optional "duration" and "label" fields. For multi-segment recordings, provide a list of numpy array events, one for each segment. - cmap : matplotlib colormap, default : "RdBu_r" + cmap : matplotlib colormap, default: "RdBu_r" matplotlib colormap used in mode "map" - show_channel_ids : bool, default : False + show_channel_ids : bool, default: False Set yticks with channel ids - color_groups : bool, default : False + color_groups : bool, default: False If True groups are plotted with different colors - color : str or None, default : None + color : str or None, default: None The color used to draw the traces - clim : None, tuple or dict, default : None + clim : None, tuple or dict, default: None When mode is "map", this argument controls color limits. If dict, keys should be the same as recording keys - scale : float, default : 1 + scale : float, default: 1 Scale factor for the traces - with_colorbar : bool, default : True + with_colorbar : bool, default: True When mode is "map", a colorbar is added - tile_size : int, default : 1500 + tile_size : int, default: 1500 For sortingview backend, the size of each tile in the rendered image - seconds_per_row : float, default : 0.2 + seconds_per_row : float, default: 0.2 For "map" mode and sortingview backend, seconds to render in each row - add_legend : bool, default : True + add_legend : bool, default: True If True adds legend to figures """ diff --git a/src/spikeinterface/widgets/unit_depths.py b/src/spikeinterface/widgets/unit_depths.py index 5627f766ee..18d173fc36 100644 --- a/src/spikeinterface/widgets/unit_depths.py +++ b/src/spikeinterface/widgets/unit_depths.py @@ -18,11 +18,11 @@ class UnitDepthsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer object - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values - depth_axis : int, default : 1 + depth_axis : int, default: 1 The dimension of unit_locations that is depth - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Sign of peak for amplitudes """ diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 2f5742eca6..3329c2183c 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -18,20 +18,20 @@ class UnitLocationsWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer that must contains "unit_locations" extension - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - with_channel_ids : bool, default : False + with_channel_ids : bool, default: False Add channel ids text on the probe - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False If True, the unit selector is not displayed (sortingview backend) - plot_all_units : bool, default : True + plot_all_units : bool, default: True If True, all units are plotted. The unselected ones (not in unit_ids), are plotted in grey (matplotlib backend) - plot_legend : bool, default : False + plot_legend : bool, default: False If True, the legend is plotted (matplotlib backend) - hide_axis : bool, default : False + hide_axis : bool, default: False If True, the axis is set to off (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/unit_presence.py b/src/spikeinterface/widgets/unit_presence.py index 5a9d43af11..cdb6bf8264 100644 --- a/src/spikeinterface/widgets/unit_presence.py +++ b/src/spikeinterface/widgets/unit_presence.py @@ -15,11 +15,11 @@ class UnitPresenceWidget(BaseWidget): The sorting extractor object segment_index : None or int The segment index. - time_range : list or None, default : None + time_range : list or None, default: None List with start time and end time - bin_duration_s : float, default : 0.5 + bin_duration_s : float, default: 0.5 Bin size (in seconds) for the heat map time axis - smooth_sigma : float, default : 4.5 + smooth_sigma : float, default: 4.5 Sigma for the Gaussian kernel (in number of bins) """ diff --git a/src/spikeinterface/widgets/unit_probe_map.py b/src/spikeinterface/widgets/unit_probe_map.py index 3df9354840..bba4bd774e 100644 --- a/src/spikeinterface/widgets/unit_probe_map.py +++ b/src/spikeinterface/widgets/unit_probe_map.py @@ -25,9 +25,9 @@ class UnitProbeMapWidget(BaseWidget): List of unit ids. channel_ids : list The channel ids to display - animated : bool, default : False + animated : bool, default: False Animation for amplitude on time - with_channel_ids : bool, default : False + with_channel_ids : bool, default: False add channel ids text on the probe """ diff --git a/src/spikeinterface/widgets/unit_summary.py b/src/spikeinterface/widgets/unit_summary.py index d0457e52fa..0b2a348edf 100644 --- a/src/spikeinterface/widgets/unit_summary.py +++ b/src/spikeinterface/widgets/unit_summary.py @@ -25,9 +25,9 @@ class UnitSummaryWidget(BaseWidget): The SortingAnalyzer object unit_id : int or str The unit id to plot the summary of - unit_colors : dict or None, default : None + unit_colors : dict or None, default: None If given, a dictionary with unit ids as keys and colors as values, - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None Optional ChannelSparsity to apply. If SortingAnalyzer is already sparse, the argument is ignored """ diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index 4b41b51398..add8c820b8 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -19,45 +19,45 @@ class UnitWaveformsWidget(BaseWidget): sorting_analyzer_or_templates : SortingAnalyzer | Templates The SortingAnalyzer or Templates object. If Templates is given, the "plot_waveforms" argument is set to False - channel_ids : list or None, default : None + channel_ids : list or None, default: None The channel ids to display - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - plot_templates : bool, default : True + plot_templates : bool, default: True If True, templates are plotted over the waveforms - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - set_title : bool, default : True + set_title : bool, default: True Create a plot title with the unit number if True - plot_channels : bool, default : False + plot_channels : bool, default: False Plot channel locations below traces - unit_selected_waveforms : None or dict, default : None + unit_selected_waveforms : None or dict, default: None A dict key is unit_id and value is the subset of waveforms indices that should be be displayed (matplotlib backend) - max_spikes_per_unit : int or None, default : 50 + max_spikes_per_unit : int or None, default: 50 If given and unit_selected_waveforms is None, only max_spikes_per_unit random units are displayed per waveform, (matplotlib backend) - scale : float, default : 1 + scale : float, default: 1 Scale factor for the waveforms/templates (matplotlib backend) - widen_narrow_scale : float, default : 1 + widen_narrow_scale : float, default: 1 Scale factor for the x-axis of the waveforms/templates (matplotlib backend) - axis_equal : bool, default : False + axis_equal : bool, default: False Equal aspect ratio for x and y axis, to visualize the array geometry to scale - lw_waveforms : float, default : 1 + lw_waveforms : float, default: 1 Line width for the waveforms, (matplotlib backend) - lw_templates : float, default : 2 + lw_templates : float, default: 2 Line width for the templates, (matplotlib backend) - unit_colors : None or dict, default : None + unit_colors : None or dict, default: None A dict key is unit_id and value is any color format handled by matplotlib. If None, then the get_unit_colors() is internally used. (matplotlib / ipywidgets backend) - alpha_waveforms : float, default : 0.5 + alpha_waveforms : float, default: 0.5 Alpha value for waveforms (matplotlib backend) - alpha_templates : float, default : 1 + alpha_templates : float, default: 1 Alpha value for templates, (matplotlib backend) - shade_templates : bool, default : True + shade_templates : bool, default: True If True, templates are shaded, see templates_percentile_shading argument - templates_percentile_shading : float, tuple/list of floats, or None, default : (1, 25, 75, 99) + templates_percentile_shading : float, tuple/list of floats, or None, default: (1, 25, 75, 99) It controls the shading of the templates. If None, the shading is +/- the standard deviation of the templates. If float, it controls the percentile of the template values used to shade the templates. @@ -67,16 +67,16 @@ class UnitWaveformsWidget(BaseWidget): are used for the lower bounds, and the second half for the upper bounds. Inner elements produce darker shadings. For sortingview backend only 2 or 4 elements are supported. - scalebar : bool, default : False + scalebar : bool, default: False Display a scale bar on the waveforms plot (matplotlib backend) - hide_unit_selector : bool, default : False + hide_unit_selector : bool, default: False For sortingview backend, if True the unit selector is not displayed - same_axis : bool, default : False + same_axis : bool, default: False If True, waveforms and templates are displayed on the same axis (matplotlib backend) - x_offset_units : bool, default : False + x_offset_units : bool, default: False In case same_axis is True, this parameter allow to x-offset the waveforms for different units (recommended for a few units) (matlotlib backend) - plot_legend : bool, default : True + plot_legend : bool, default: True Display legend (matplotlib backend) """ diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index 9ff10331c7..6ef1a7a782 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -16,21 +16,21 @@ class UnitWaveformDensityMapWidget(BaseWidget): ---------- sorting_analyzer : SortingAnalyzer The SortingAnalyzer for calculating waveforms - channel_ids : list or None, default : None + channel_ids : list or None, default: None The channel ids to display - unit_ids : list or None, default : None + unit_ids : list or None, default: None List of unit ids - sparsity : ChannelSparsity or None, default : None + sparsity : ChannelSparsity or None, default: None Optional ChannelSparsity to apply If SortingAnalyzer is already sparse, the argument is ignored - use_max_channel : bool, default : False + use_max_channel : bool, default: False Use only the max channel - peak_sign : "neg" | "pos" | "both", default : "neg" + peak_sign : "neg" | "pos" | "both", default: "neg" Used to detect max channel only when use_max_channel=True - unit_colors : None or dict, default : None + unit_colors : None or dict, default: None A dict key is unit_id and value is any color format handled by matplotlib. If None, then the get_unit_colors() is internally used - same_axis : bool, default : False + same_axis : bool, default: False If True then all density are plot on the same axis and then channels is the union all channel per units """ From a2fa1fd63d7ce780589b65bfb5fd5504cf07d3ad Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Mon, 3 Jun 2024 09:37:52 +0100 Subject: [PATCH 3/5] Remove spaces from Performance comments --- .../comparison/paircomparisons.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/comparison/paircomparisons.py b/src/spikeinterface/comparison/paircomparisons.py index 865451705c..ea4b72b200 100644 --- a/src/spikeinterface/comparison/paircomparisons.py +++ b/src/spikeinterface/comparison/paircomparisons.py @@ -664,24 +664,24 @@ def count_units_categories( _template_txt_performance = """PERFORMANCE ({method}) ----------- -ACCURACY : {accuracy} -RECALL : {recall} -PRECISION : {precision} -FALSE DISCOVERY RATE : {false_discovery_rate} -MISS RATE : {miss_rate} +ACCURACY: {accuracy} +RECALL: {recall} +PRECISION: {precision} +FALSE DISCOVERY RATE: {false_discovery_rate} +MISS RATE: {miss_rate} """ _template_summary_part1 = """SUMMARY ------- -GT num_units : {num_gt} -TESTED num_units : {num_tested} -num_well_detected : {num_well_detected} -num_redundant : {num_redundant} -num_overmerged : {num_overmerged} +GT num_units: {num_gt} +TESTED num_units: {num_tested} +num_well_detected: {num_well_detected} +num_redundant: {num_redundant} +num_overmerged: {num_overmerged} """ _template_summary_part2 = """num_false_positive_units {num_false_positive_units} -num_bad : {num_bad} +num_bad: {num_bad} """ From a7e767871c80a717b329ab85c9dae54c37b497b5 Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Mon, 3 Jun 2024 10:00:33 +0100 Subject: [PATCH 4/5] Add new style to documentation --- doc/development/development.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index 74ecfcb734..683a6807e9 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -165,16 +165,16 @@ This allows users to quickly understand the type of data that should be input in Parameters ---------- - param_a: dict + param_a : dict A dictionary containing the data - param_b: int, default: 5 + param_b : int, default: 5 A scaling factor to be applied to the data - param_c: "mean" | "median", default: "mean" + param_c : "mean" | "median", default: "mean" What to calculate on the data Returns ------- - great_data: dict + great_data : dict A dictionary of the processed data """ From 5746bdec26c8d0b29213f7844f5b7ccbdd1de020 Mon Sep 17 00:00:00 2001 From: chrishalcrow <57948917+chrishalcrow@users.noreply.github.com> Date: Tue, 4 Jun 2024 08:46:08 +0100 Subject: [PATCH 5/5] update development docs --- doc/development/development.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index 683a6807e9..1094b466fc 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -152,7 +152,7 @@ for providing parameters, however is a little different. The project prefers the .. code-block:: bash - parameter_name: type, default: default_value + parameter_name : type, default: default_value This allows users to quickly understand the type of data that should be input into a function as well as whether a default is supplied. A full example would be: @@ -179,7 +179,8 @@ This allows users to quickly understand the type of data that should be input in """ -Note that in this example we demonstrate two other docstring conventions followed by SpikeInterface. First, that all string arguments should be presented +There should be a space between each parameter and the colon following it. This is neccessary for using the `numpydoc validator `_. +In the above example we demonstrate two other docstring conventions followed by SpikeInterface. First, that all string arguments should be presented with double quotes. This is the same stylistic convention followed by Black and enforced by the pre-commit for the repo. Second, when a parameter is a string with a limited number of values (e.g. :code:`mean` and :code:`median`), rather than give the type a value of :code:`str`, please list the possible strings so that the user knows what the options are.