diff --git a/doc/api.rst b/doc/api.rst index 97c956c2f6..ab81b1596a 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -239,7 +239,6 @@ spikeinterface.comparison .. autofunction:: compare_sorter_to_ground_truth .. autofunction:: compare_templates .. autofunction:: compare_multiple_templates - .. autofunction:: aggregate_performances_table .. autofunction:: create_hybrid_units_recording .. autofunction:: create_hybrid_spikes_recording @@ -272,12 +271,22 @@ spikeinterface.widgets .. autofunction:: set_default_plotter_backend .. autofunction:: get_default_plotter_backend + .. autofunction:: plot_agreement_matrix .. autofunction:: plot_all_amplitudes_distributions .. autofunction:: plot_amplitudes .. autofunction:: plot_autocorrelograms + .. autofunction:: plot_confusion_matrix + .. autofunction:: plot_comparison_collision_by_similarity .. autofunction:: plot_crosscorrelograms + .. autofunction:: plot_isi_distribution .. autofunction:: plot_motion + .. autofunction:: plot_multicomparison_agreement + .. autofunction:: plot_multicomparison_agreement_by_sorter + .. autofunction:: plot_multicomparison_graph + .. autofunction:: plot_peak_activity + .. autofunction:: plot_probe_map .. autofunction:: plot_quality_metrics + .. autofunction:: plot_rasters .. autofunction:: plot_sorting_summary .. autofunction:: plot_spike_locations .. autofunction:: plot_spikes_on_traces @@ -286,34 +295,18 @@ spikeinterface.widgets .. autofunction:: plot_traces .. autofunction:: plot_unit_depths .. autofunction:: plot_unit_locations + .. autofunction:: plot_unit_presence + .. autofunction:: plot_unit_probe_map .. autofunction:: plot_unit_summary .. autofunction:: plot_unit_templates .. autofunction:: plot_unit_waveforms_density_map .. autofunction:: plot_unit_waveforms - - -Legacy widgets -~~~~~~~~~~~~~~ - -These widgets are only available with the "matplotlib" backend - -.. automodule:: spikeinterface.widgets - :noindex: - - .. autofunction:: plot_rasters - .. autofunction:: plot_probe_map - .. autofunction:: plot_isi_distribution - .. autofunction:: plot_peak_activity_map - .. autofunction:: plot_principal_component - .. autofunction:: plot_unit_probe_map - .. autofunction:: plot_confusion_matrix - .. autofunction:: plot_agreement_matrix - .. autofunction:: plot_multicomp_graph - .. autofunction:: plot_multicomp_agreement - .. autofunction:: plot_multicomp_agreement_by_sorter - .. autofunction:: plot_comparison_collision_pair_by_pair - .. autofunction:: plot_comparison_collision_by_similarity - .. autofunction:: plot_sorting_performance + .. autofunction:: plot_study_run_times + .. autofunction:: plot_study_unit_counts + .. autofunction:: plot_study_performances + .. autofunction:: plot_study_agreement_matrix + .. autofunction:: plot_study_summary + .. autofunction:: plot_study_comparison_collision_by_similarity spikeinterface.exporters diff --git a/doc/install_sorters.rst b/doc/install_sorters.rst index 10a3185c5c..e805f03eed 100644 --- a/doc/install_sorters.rst +++ b/doc/install_sorters.rst @@ -32,8 +32,8 @@ Some novel spike sorting algorithms are implemented directly in SpikeInterface u :py:mod:`spikeinterface.sortingcomponents` module. Checkout the :ref:`si_based` section of this page for more information! -If you experience installation problems please directly contact the authors of theses tools or write on the -related mailing list, google group, etc. +If you experience installation problems please directly contact the authors of these tools or write on the +related mailing list, google group, GitHub issue page, etc. Please feel free to enhance this document with more installation tips. @@ -251,31 +251,6 @@ Combinato # or using CombinatoSorter.set_combinato_path() -Klusta (LEGACY) -^^^^^^^^^^^^^^^ - -* Python -* Requires SpikeInterface<0.96.0 (and Python 3.7) -* Url: https://github.com/kwikteam/klusta -* Authors: Cyrille Rossant, Shabnam Kadir, Dan Goodman, Max Hunter, Kenneth Harris -* Installation:: - - pip install Cython h5py tqdm - pip install click klusta klustakwik2 - -* See also: https://github.com/kwikteam/phy - - -Yass (LEGACY) -^^^^^^^^^^^^^ - -* Python, CUDA, torch -* Requires SpikeInterface<0.96.0 (and Python 3.7) -* Url: https://github.com/paninski-lab/yass -* Authors: JinHyung Lee, Catalin Mitelut, Liam Paninski -* Installation:: - - https://github.com/paninski-lab/yass/wiki/Installation-Local .. _si_based: @@ -302,3 +277,50 @@ working not only at peak times but at all times, recovering more spikes close to pip install hdbscan pip install spikeinterface pip install numba (or conda install numba as recommended by conda authors) + + +Tridesclous2 +^^^^^^^^^^^^ + +This is an upgraded version of Tridesclous, natively written in SpikeInterface. +#Same add his notes. + +* Python +* Requires: HDBSCAN and Numba +* Authors: Samuel Garcia +* Installation:: + + pip install hdbscan + pip install spikeinterface + pip install numba + + + +Legacy Sorters +-------------- + +Klusta (LEGACY) +^^^^^^^^^^^^^^^ + +* Python +* Requires SpikeInterface<0.96.0 (and Python 3.7) +* Url: https://github.com/kwikteam/klusta +* Authors: Cyrille Rossant, Shabnam Kadir, Dan Goodman, Max Hunter, Kenneth Harris +* Installation:: + + pip install Cython h5py tqdm + pip install click klusta klustakwik2 + +* See also: https://github.com/kwikteam/phy + + +Yass (LEGACY) +^^^^^^^^^^^^^ + +* Python, CUDA, torch +* Requires SpikeInterface<0.96.0 (and Python 3.7) +* Url: https://github.com/paninski-lab/yass +* Authors: JinHyung Lee, Catalin Mitelut, Liam Paninski +* Installation:: + + https://github.com/paninski-lab/yass/wiki/Installation-Local diff --git a/doc/modules/core.rst b/doc/modules/core.rst index 4c03950b1d..656176f27a 100644 --- a/doc/modules/core.rst +++ b/doc/modules/core.rst @@ -56,7 +56,7 @@ We recommend this approach to advanced users, since it requires a deeper knowled Recording --------- -The :py:class:`~spikeinterface.core.BaseRecording` class serves as basis for all +The :py:class:`~spikeinterface.core.BaseRecording` class serves as the basis for all :code:`Recording` classes. It interfaces with the raw traces and has the following features: @@ -86,7 +86,7 @@ with 16 channels: # retrieve raw traces between frames 100 and 200 traces = recording.get_traces(start_frame=100, end_frame=200, segment_index=0) - # retrieve raw traces only for the first 4 of the channels + # retrieve raw traces only for the first 4 channels traces_slice = recording.get_traces(start_frame=100, end_frame=200, segment_index=0, channel_ids=channel_ids[:4]) # retrieve traces after scaling to uV @@ -119,7 +119,7 @@ with 16 channels: # 'recording_by_group' is a dict with group as keys (0,1,2,3) and channel # sliced recordings as values - # set times (for synchronization) - assume out times start at 300 seconds + # set times (for synchronization) - assume our times start at 300 seconds timestamps = np.arange(num_samples) / sampling_frequency + 300 recording.set_times(timestamps, segment_index=0) @@ -127,7 +127,7 @@ with 16 channels: Sorting ------- -The :py:class:`~spikeinterface.core.BaseSorting` class serves as basis for all :code:`Sorting` classes. +The :py:class:`~spikeinterface.core.BaseSorting` class serves as the basis for all :code:`Sorting` classes. It interfaces with a spike-sorted output and has the following features: * retrieve spike trains of different units @@ -143,14 +143,14 @@ with 10 units: .. code-block:: python - unit_ids = sorting.channel_ids + unit_ids = sorting.unit_ids num_channels = sorting.get_num_units() sampling_frequency = sorting.sampling_frequency # retrieve spike trains for a unit (returned as sample indices) unit0 = unit_ids[0] spike_train = sorting.get_unit_spike_train(unit_id=unit0, segment_index=0) - # retrieve spikes between 100 and 200 + # retrieve spikes between frames 100 and 200 spike_train_slice = sorting.get_unit_spike_train(unit_id=unit0, start_frame=100, end_frame=200, segment_index=0) @@ -167,13 +167,13 @@ with 10 units: sorting.annotate(date="Spike sorted today") sorting.get_annotation(key="date") - # get new sorting with the first 10s of spike trains + # get new sorting within the first 10s of the spike trains sorting_slice_frames = sorting.frame_slice(start_frame=0, end_frame=int(10*sampling_frequency)) - # get new sorting with the first 4 units + # get new sorting with only the first 4 units sorting_select_units = sorting.select_units(unit_ids=unit_ids[:4]) - # register 'recording' from previous and get spike trains in seconds + # register 'recording' from the previous example and get the spike trains in seconds sorting.register_recording(recording) spike_train_s = sorting.get_unit_spike_train(unit_id=unit0, segment_index=0, return_times=True) @@ -183,10 +183,10 @@ with 10 units: Internally, any sorting object can construct 2 internal caches: - 1. a list (per segment) of dict (per unit) of numpy.array. This cache is usefull when accessing spiketrains unit - per unit across segments. - 2. a unique numpy.array with structured dtype aka "spikes vector". This is usefull for processing by small chunk of - time, like extract amplitudes from a recording. + 1. a list (per segment) of dict (per unit) of numpy.array. This cache is useful when accessing spike trains on a unit + per unit basis across segments. + 2. a unique numpy.array with structured dtype aka "spikes vector". This is useful for processing by small chunks of + time, like for extracting amplitudes from a recording. WaveformExtractor @@ -194,12 +194,12 @@ WaveformExtractor The :py:class:`~spikeinterface.core.WaveformExtractor` class is the core object to combine a :py:class:`~spikeinterface.core.BaseRecording` and a :py:class:`~spikeinterface.core.BaseSorting` object. -Waveforms are very important for additional analysis, and the basis of several postprocessing and quality metrics +Waveforms are very important for additional analyses, and the basis of several postprocessing and quality metrics computations. The :py:class:`~spikeinterface.core.WaveformExtractor` allows us to: -* extract and waveforms +* extract waveforms * sub-sample spikes for waveform extraction * compute templates (i.e. average extracellular waveforms) with different modes * save waveforms in a folder (in numpy / `Zarr `_) for easy retrieval @@ -215,16 +215,28 @@ Finally, an existing :py:class:`~spikeinterface.core.WaveformExtractor` can be s .. code-block:: python # extract dense waveforms on 500 spikes per unit - we = extract_waveforms(recording, sorting, folder="waveforms", - max_spikes_per_unit=500) + we = extract_waveforms(recording=recording, + sorting=sorting, + sparse=False, + folder="waveforms", + max_spikes_per_unit=500 + overwrite=True) # same, but with parallel processing! (1s chunks processed by 8 jobs) job_kwargs = dict(n_jobs=8, chunk_duration="1s") - we = extract_waveforms(recording, sorting, folder="waveforms_par", - max_spikes_per_unit=500, overwrite=True, + we = extract_waveforms(recording=recording, + sorting=sorting, + sparse=False, + folder="waveforms_parallel", + max_spikes_per_unit=500, + overwrite=True, **job_kwargs) # same, but in-memory - we_mem = extract_waveforms(recording, sorting, folder=None, - mode="memory", max_spikes_per_unit=500, + we_mem = extract_waveforms(recording=recording, + sorting=sorting, + sparse=False, + folder=None, + mode="memory", + max_spikes_per_unit=500, **job_kwargs) # load pre-computed waveforms @@ -243,13 +255,16 @@ Finally, an existing :py:class:`~spikeinterface.core.WaveformExtractor` can be s template_stds = we.get_all_templates(mode="std") # save to Zarr - we_zarr = we.save(folder="waveforms.zarr", format="zarr") + we_zarr = we.save(folder="waveforms_zarr", format="zarr") # extract sparse waveforms (see Sparsity section) - # this will use 50 spike per unit to estimate the sparsity of 40um radius for each unit - we_sparse = extract_waveforms(recording, sorting, folder="waveforms_sparse", - max_spikes_per_unit=500, sparse=True, - method="radius", radius_um=40, + # this will use 50 spikes per unit to estimate the sparsity within a 40um radius from that unit + we_sparse = extract_waveforms(recording=recording, + sorting=sorting, + folder="waveforms_sparse", + max_spikes_per_unit=500, + method="radius", + radius_um=40, num_spikes_for_sparsity=50) @@ -265,11 +280,14 @@ In order to make a waveform folder portable (e.g. copied to another location or # save the sorting object in the "processed" folder sorting = sorting.save(folder=processed_folder / "sorting") # extract waveforms using relative paths - we = extract_waveforms(recording, sorting, folder=processed_folder / "waveforms", + we = extract_waveforms(recording=recording, + sorting=sorting, + folder=processed_folder / "waveforms", use_relative_path=True) # the "processed" folder is now portable, and the waveform extractor can be reloaded # from a different location/machine (without loading the recording) - we_loaded = si.load_waveforms(processed_folder / "waveforms", with_recording=False) + we_loaded = si.load_waveforms(folder=processed_folder / "waveforms", + with_recording=False) Event @@ -278,7 +296,7 @@ Event The :py:class:`~spikeinterface.core.BaseEvent` class serves as basis for all :code:`Event` classes. It allows one to retrieve events and epochs (e.g. TTL pulses). Internally, events are represented as numpy arrays with a structured dtype. The structured dtype -must contain the :code:`time` field, which represent the event times in seconds. Other fields are +must contain the :code:`time` field, which represents the event times in seconds. Other fields are optional. Here we assume :code:`event` is a :py:class:`~spikeinterface.core.BaseEvent` object @@ -313,7 +331,7 @@ threshold and only record the times at which a peak was detected and the wavefor the peak. **NOTE**: while we support this class (mainly for legacy formats), this approach is a bad practice -and highly discouraged! Most modern spike sorters, in fact, require the raw traces to perform +and is highly discouraged! Most modern spike sorters, in fact, require the raw traces to perform template matching to recover spikes! Here we assume :code:`snippets` is a :py:class:`~spikeinterface.core.BaseSnippets` object @@ -374,9 +392,9 @@ The probe has 4 shanks, which can be loaded as separate groups (and spike sorted # set probe recording_w_probe = recording.set_probe(probe) - # set probe with group info + # set probe with group info and return a new recording object recording_w_probe = recording.set_probe(probe, group_mode="by_shank") - # set probe in place + # set probe in place, ie, modify the current recording recording.set_probe(probe, group_mode="by_shank", in_place=True) # retrieve probe @@ -401,11 +419,16 @@ probes, such as Neuropixels, because the waveforms of a unit will only appear on Sparsity is defined as the subset of channels on which waveforms (and related information) are defined. Of course, sparsity is not global, but it is unit-specific. +**NOTE** As of version :code:`0.99.0` the default for a :code:`extract_waveforms()` has `sparse=True`, ie every :code:`waveform_extractor` +will be sparse by default. Thus for users that wish to have dense waveforms they must set `sparse=False`. Keyword arguments +can still be input into the :code:`extract_wavforms()` to generate the desired sparsity as explained below. + Sparsity can be computed from a :py:class:`~spikeinterface.core.WaveformExtractor` object with the :py:func:`~spikeinterface.core.compute_sparsity` function: .. code-block:: python + # in this case 'we' should be a dense waveform_extractor sparsity = compute_sparsity(we, method="radius", radius_um=40) The returned :code:`sparsity` is a :py:class:`~spikeinterface.core.ChannelSparsity` object, which has convenient @@ -419,11 +442,11 @@ methods to access the sparsity information in several ways: There are several methods to compute sparsity, including: * | :code:`method="radius"`: selects the channels based on the channel locations. For example, using a - | :code:`radius_um=40`, will select, for each unit, the channels which are whithin 40um of the channel with the - | largest amplitude (*extremum channel*). **This is the recommended method for high-density probes** + | :code:`radius_um=40`, will select, for each unit, the channels which are within 40um of the channel with the + | largest amplitude (*the extremum channel*). **This is the recommended method for high-density probes** * | :code:`method="best_channels"`: selects the best :code:`num_channels` channels based on their amplitudes. Note that | in this case the selected channels might not be close to each other. -* | :code:`method="threshold"`: selects channels based on an SNR threshold (:code:`threshold` argument) +* | :code:`method="threshold"`: selects channels based on an SNR threshold (given by the :code:`threshold` argument) * | :code:`method="by_property"`: selects channels based on a property, such as :code:`group`. This method is recommended | when working with tetrodes. @@ -432,7 +455,7 @@ The computed sparsity can be used in several postprocessing and visualization fu .. code-block:: python - we_sparse = we.save(we, sparsity=sparsity, folder="waveforms_sparse") + we_sparse = we.save(waveform_extractor=we, sparsity=sparsity, folder="waveforms_sparse") The :code:`we_sparse` object will now have an associated sparsity (:code:`we.sparsity`), which is automatically taken into consideration for downstream analysis (with the :py:meth:`~spikeinterface.core.WaveformExtractor.is_sparse` @@ -460,10 +483,12 @@ and annotations associated to the object. The save function also supports parallel processing to speed up the writing process. From a SpikeInterface folder, the saved object can be reloaded with the :code:`load_extractor()` function. -This saving/loading features enables to store SpikeInterface objects efficiently and to distribute processing. +This saving/loading features enables us to store SpikeInterface objects efficiently and to distribute processing. .. code-block:: python + # n_jobs is related to the number of processors you want to use + # n_jobs=-1 indicates to use all available job_kwargs = dict(n_jobs=8, chunk_duration="1s") # save recording to folder in binary (default) format recording_bin = recording.save(folder="recording", **job_kwargs) @@ -475,7 +500,7 @@ This saving/loading features enables to store SpikeInterface objects efficiently sorting_saved = sorting.save(folder="sorting") **NOTE:** the Zarr format by default applies data compression with :code:`Blosc.Zstandard` codec with BIT shuffling. -Any other Zarr-compatible compressor and filters can be applied using the :code:`compressor` and :code:`filters` +Any other Zarr-compatible compressors and filters can be applied using the :code:`compressor` and :code:`filters` arguments. For example, in this case we apply `LZMA `_ and use a `Delta `_ filter: @@ -550,7 +575,7 @@ In order to do this, one can use the :code:`Numpy*` classes, :py:class:`~spikein but they are not bound to a file. Also note the class :py:class:`~spikeinterface.core.SharedMemorySorting` which is very similar to -Similar to :py:class:`~spikeinterface.core.NumpySorting` but with an unerlying SharedMemory which is usefull for +Similar to :py:class:`~spikeinterface.core.NumpySorting` but with an underlying SharedMemory which is useful for parallel computing. In this example, we create a recording and a sorting object from numpy objects: @@ -585,14 +610,14 @@ In this example, we create a recording and a sorting object from numpy objects: Any sorting object can be transformed into a :py:class:`~spikeinterface.core.NumpySorting` or -:py:class:`~spikeinterface.core.SharedMemorySorting` easily like this +:py:class:`~spikeinterface.core.SharedMemorySorting` easily like this: .. code-block:: python # turn any sortinto into NumpySorting - soring_np = sorting.to_numpy_sorting() + sorting_np = sorting.to_numpy_sorting() - # or to SharedMemorySorting for parrallel computing + # or to SharedMemorySorting for parallel computing sorting_shm = sorting.to_shared_memory_sorting() @@ -602,7 +627,7 @@ Manipulating objects: slicing, aggregating ------------------------------------------- :py:class:`~spikeinterface.core.BaseRecording` (and :py:class:`~spikeinterface.core.BaseSnippets`) -and :py:class:`~spikeinterface.core.BaseSorting` objects can be sliced in the time or channel/unit axis. +and :py:class:`~spikeinterface.core.BaseSorting` objects can be sliced on the time or channel/unit axis. This operations are completely lazy, as there is no data duplication. After slicing or aggregating, the new objects will be a *view* of the original ones. @@ -611,11 +636,11 @@ the new objects will be a *view* of the original ones. # here we load a very long recording and sorting recording = read_spikeglx('np_folder') - sorting =read_kilosrt('ks_folder') + sorting =read_kilosort('ks_folder') - # keep one channel every ten channels - keep_ids = rec.channel_ids[::10] - sub_recording = rec.channel_slice(channel_ids=keep_ids) + # keep one channel of every tenth channel + keep_ids = recording.channel_ids[::10] + sub_recording = recording.channel_slice(channel_ids=keep_ids) # keep between 5min and 12min fs = recording.sampling_frequency @@ -641,8 +666,8 @@ We can also aggregate (or stack) multiple sortings on the unit axis using the .. code-block:: python - sortingA = read_npz('sortingA.npz') - sortingB = read_npz('sortingB.npz') + sortingA = read_npz_sorting('sortingA.npz') + sortingB = read_npz_sorting('sortingB.npz') sorting_20_units = aggregate_units([sortingA, sortingB]) @@ -706,7 +731,7 @@ object: * :py:func:`~spikeinterface.core.get_chunk_with_margin`: gets traces with a left and right margin * :py:func:`~spikeinterface.core.get_closest_channels`: returns the :code:`num_channels` closest channels to each specified channel * :py:func:`~spikeinterface.core.get_channel_distances`: returns a square matrix with channel distances - * :py:func:`~spikeinterface.core.order_channels_by_depth`: gets channel order in depth: + * :py:func:`~spikeinterface.core.order_channels_by_depth`: gets channel order in depth Template tools diff --git a/doc/modules/curation.rst b/doc/modules/curation.rst index 23e9e20d96..032988818b 100644 --- a/doc/modules/curation.rst +++ b/doc/modules/curation.rst @@ -76,7 +76,7 @@ merges. Therefore, it has many parameters and options. clean_sorting = MergeUnitsSorting(parent_sorting=sorting, units_to_merge=merges) -Manual curation with sorting view +Manual curation with sortingview --------------------------------- Within the :code:`sortingview` widgets backend (see :ref:`sorting_view`), the @@ -108,8 +108,9 @@ The manual curation (including merges and labels) can be applied to a SpikeInter _ = compute_correlograms(waveform_extractor=we) # This loads the data to the cloud for web-based plotting and sharing + # curation=True required for allowing curation in the sortingview gui plot_sorting_summary(waveform_extractor=we, curation=True, backend='sortingview') - # we open the printed link URL in a browswe + # we open the printed link URL in a browser # - make manual merges and labeling # - from the curation box, click on "Save as snapshot (sha1://)" diff --git a/doc/modules/exporters.rst b/doc/modules/exporters.rst index 155050ddb0..b322139c2b 100644 --- a/doc/modules/exporters.rst +++ b/doc/modules/exporters.rst @@ -28,14 +28,14 @@ The input of the :py:func:`~spikeinterface.exporters.export_to_phy` is a :code:` from spikeinterface.exporters import export_to_phy # the waveforms are sparse so it is faster to export to phy - we = extract_waveforms(recording=recording, sorting=sorting, folder='waveforms', sparse=True) + we = extract_waveforms(recording=recording, sorting=sorting, folder='waveforms') # some computations are done before to control all options - compute_spike_amplitudes(waveform_extractor=we) - compute_principal_components(waveform_extractor=we, n_components=3, mode='by_channel_global') + _ = compute_spike_amplitudes(waveform_extractor=we) + _ = compute_principal_components(waveform_extractor=we, n_components=3, mode='by_channel_global') # the export process is fast because everything is pre-computed - export_to_phy(wavefor_extractor=we, output_folder='path/to/phy_folder') + export_to_phy(waveform_extractor=we, output_folder='path/to/phy_folder') @@ -71,12 +71,12 @@ with many units! # the waveforms are sparse for more interpretable figures - we = extract_waveforms(recording=recording, sorting=sorting, folder='path/to/wf', sparse=True) + we = extract_waveforms(recording=recording, sorting=sorting, folder='path/to/wf',) # some computations are done before to control all options - compute_spike_amplitudes(waveform_extractor=we) - compute_correlograms(waveform_extractor=we) - compute_quality_metrics(waveform_extractor=we, metric_names=['snr', 'isi_violation', 'presence_ratio']) + _ = compute_spike_amplitudes(waveform_extractor=we) + - = compute_correlograms(waveform_extractor=we) + _ = compute_quality_metrics(waveform_extractor=we, metric_names=['snr', 'isi_violation', 'presence_ratio']) # the export process export_report(waveform_extractor=we, output_folder='path/to/spikeinterface-report-folder') diff --git a/doc/modules/postprocessing.rst b/doc/modules/postprocessing.rst index 112c6e367d..195413e2af 100644 --- a/doc/modules/postprocessing.rst +++ b/doc/modules/postprocessing.rst @@ -18,7 +18,7 @@ of a :code:`WaveformExtractor` will be saved along side the :code:`WaveformExtra This workflow is convenient for retrieval of time-consuming computations (such as pca or spike amplitudes) when reloading a :code:`WaveformExtractor`. -:py:class:`~spikeinterface.core.BaseWaveformExtractorExtension` objects are tightly connected to the +:py:class:`~spikeinterface.core.BaseWaveformExtractorExtension` objects are tightly connected to the parent :code:`WaveformExtractor` object, so that operations done on the :code:`WaveformExtractor`, such as saving, loading, or selecting units, will be automatically applied to all extensions. diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 5040b01ec2..98a5ea4fcf 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -12,9 +12,9 @@ On the other hand SpikeInterface directly implements some internal sorters (**sp that do not depend on external tools, but depend on the :py:mod:`spikeinterface.sortingcomponents` module. **Note that internal sorters are currently experimental and under development**. -A drawback of using external sorters is the installation of these tools. Sometimes they need MATLAB, -specific versions of CUDA, specific gcc versions vary or even worse outdated versions of -Python/NumPy. In that case, SpikeInterface offer the mechanism of running external sorters inside a +A drawback of using external sorters is the separate installation of these tools. Sometimes they need MATLAB, +specific versions of CUDA, specific gcc versions or outdated versions of +Python/NumPy. In this case, SpikeInterface offers the mechanism of running external sorters inside a container (Docker/Singularity) with the sorter pre-installed. See :ref:`containerizedsorters`. @@ -244,7 +244,7 @@ There are three options: the current development version from the :code:`main` branch will be installed in the container. 3. **local copy**: if you installed :code:`spikeinterface` from source and you have some changes in your branch or fork - that are not in the :code:`main` branch, you can install a copy of your :code:`spikeinterface` packahe in the container. + that are not in the :code:`main` branch, you can install a copy of your :code:`spikeinterface` package in the container. To do so, you need to set en environment variable :code:`SPIKEINTERFACE_DEV_PATH` to the location where you cloned the :code:`spikeinterface` repo (e.g. on Linux: :code:`export SPIKEINTERFACE_DEV_PATH="path-to-spikeinterface-clone"`. @@ -397,7 +397,7 @@ to concatenate the recordings before spike sorting and how to split the sorted o on the concatenation. Note that some sorters (tridesclous, spykingcircus2) handle a multi-segments paradigm directly. In -that case we will use the :py:func:`~spikeinterface.core.append_recordings()` function. Many sorters +this case we will use the :py:func:`~spikeinterface.core.append_recordings()` function. Many sorters do not handle multi-segment, and in that case we will use the :py:func:`~spikeinterface.core.concatenate_recordings()` function. diff --git a/doc/modules/sortingcomponents.rst b/doc/modules/sortingcomponents.rst index 1e58972497..f33a0b3cf2 100644 --- a/doc/modules/sortingcomponents.rst +++ b/doc/modules/sortingcomponents.rst @@ -15,7 +15,7 @@ Another advantage of *modularization* is that we can accurately benchmark every For example, what is the performance of peak detection method 1 or 2, provided that the rest of the pipeline is the same? -For now, we have methods for: +Currently, we have methods for: * peak detection * peak localization * peak selection @@ -24,7 +24,7 @@ For now, we have methods for: * clustering * template matching -For some of theses steps, implementations are in a very early stage and are still a bit *drafty*. +For some of these steps, implementations are in a very early stage and are still a bit *drafty*. Signature and behavior may change from time to time in this alpha period development. You can also have a look `spikeinterface blog `_ where there are more detailed @@ -76,7 +76,7 @@ Different methods are available with the :code:`method` argument: **NOTE**: the torch implementations give slightly different results due to a different implementation. -Peak detection, as many sorting components, can be run in parallel. +Peak detection, as many of the other sorting components, can be run in parallel. Peak localization @@ -105,8 +105,8 @@ Currently, the following methods are implemented: * 'center_of_mass' * 'monopolar_triangulation' with optimizer='least_square' This method is from Julien Boussard and Erdem Varol from the Paninski lab. - This has been presented at [NeurIPS](https://nips.cc/Conferences/2021/ScheduleMultitrack?event=26709) - see also [here](https://openreview.net/forum?id=ohfi44BZPC4) + This has been presented at `NeurIPS `_ + see also `here `_ * 'monopolar_triangulation' with optimizer='minimize_with_log_penality' These methods are the same as implemented in :py:mod:`spikeinterface.postprocessing.unit_localization` @@ -133,7 +133,7 @@ Peak selection -------------- When too many peaks are detected a strategy can be used to select (or sub-sample) only some of them before clustering. -This is the strategy used by spyking-circus or tridesclous, for instance. +This is the strategy used by spyking-circus and tridesclous, for instance. Then, clustering is run on this subset of peaks, templates are extracted, and a template-matching step is run to find all spikes. @@ -219,7 +219,7 @@ Here is a short example that depends on the output of "Motion interpolation": from spikeinterface.sortingcomponents.motion_interpolation import InterpolateMotionRecording recording_corrected = InterpolateMotionRecording(recording=recording_with_drift, motion=motion, temporal_bins=temporal_bins, spatial_bins=spatial_bins - spatial_interpolation_method='kriging, + spatial_interpolation_method='kriging', border_mode='remove_channels') **Notes**: @@ -227,14 +227,14 @@ Here is a short example that depends on the output of "Motion interpolation": * :code:`border_mode` is a very important parameter. It controls dealing with the border because motion causes units on the border to not be present throughout the entire recording. We highly recommend the :code:`border_mode='remove_channels'` because this removes channels on the border that will be impacted by drift. Of course the larger the motion is - the more channels are removed. + the greater the number of channels that would be removed. Clustering ---------- The clustering step remains the central step of spike sorting. -Historically this step was separted into two distinct parts: feature reduction and clustering. +Historically this step was separated into two distinct parts: feature reduction and clustering. In SpikeInterface, we decided to regroup these two steps into the same module. This allows one to compute feature reduction 'on-the-fly' and avoid long computations and storage of large features. diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index f37b2a5a6f..4d69867d83 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -14,8 +14,9 @@ Since version 0.95.0, the :py:mod:`spikeinterface.widgets` module supports multi * | :code:`sortingview`: web-based and interactive rendering using the `sortingview `_ | and `FIGURL `_ packages. -Version 0.100.0, also come with this new backend: -* | :code:`ephyviewer`: interactive Qt based using the `ephyviewer `_ package +Version 0.99.0 also comes with this new backend: + +* :code:`ephyviewer`: interactive Qt based using the `ephyviewer `_ package Installing backends @@ -70,7 +71,7 @@ To install it, run: .. code-block:: bash - pip install sortingview figurl-jupyter + pip install sortingview Internally, the processed data to be rendered are uploaded to a public bucket in the cloud, so that they can be visualized via the web (if :code:`generate_url=True`). @@ -78,7 +79,7 @@ When running in a Jupyter notebook or JupyterLab, the sortingview widget will al notebook! To set up the backend, you need to authenticate to `kachery-cloud` using your GitHub account by running -the following command (you will be prompted a link): +the following command (you will be prompted with a link): .. code-block:: bash @@ -196,13 +197,13 @@ The functions have the following additional arguments: .. code-block:: python # sortingview backend - w_ts = sw.plot_traces(recording=recording, backend="ipywidgets") - w_ss = sw.plot_sorting_summary(recording=recording, backend="sortingview") + w_ts = sw.plot_traces(recording=recording, backend="sortingview") + w_ss = sw.plot_sorting_summary(waveform_extractor=we, curation=True, backend="sortingview") **Output:** -* `Timeseries link `_ +* `plot_traces link `_ .. image:: ../images/sv_timeseries.png @@ -259,11 +260,22 @@ The :code:`ephyviewer` backend is currently only available for the :py:func:`~sp Available plotting functions ---------------------------- +* :py:func:`~spikeinterface.widgets.plot_agreement_matrix` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_all_amplitudes_distributions` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_amplitudes` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_autocorrelograms` (backends: :code:`matplotlib`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_confusion_matrix` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_comparison_collision_by_similarity` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_crosscorrelograms` (backends: :code:`matplotlib`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_isi_distribution` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_motion` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_multicomparison_agreement` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_multicomparison_agreement_by_sorter` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_multicomparison_graph` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_peak_activity` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_probe_map` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_quality_metrics` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_rasters` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_sorting_summary` (backends: :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_spike_locations` (backends: :code:`matplotlib`, :code:`ipywidgets`) * :py:func:`~spikeinterface.widgets.plot_spikes_on_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`) @@ -272,30 +284,14 @@ Available plotting functions * :py:func:`~spikeinterface.widgets.plot_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`, :code:`ephyviewer`) * :py:func:`~spikeinterface.widgets.plot_unit_depths` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_locations` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_unit_presence` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_unit_probe_map` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_summary` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_templates` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_unit_waveforms_density_map` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_waveforms` (backends: :code:`matplotlib`, :code:`ipywidgets`) - - -Legacy plotting functions -^^^^^^^^^^^^^^^^^^^^^^^^^ - -These functions are still part of the package, but they are directly implemented in :code:`matplotlib` without the -more recend backend mechanism: - -* :py:func:`~spikeinterface.widgets.plot_rasters` -* :py:func:`~spikeinterface.widgets.plot_probe_map` -* :py:func:`~spikeinterface.widgets.plot_isi_distribution` -* :py:func:`~spikeinterface.widgets.plot_drift_over_time` -* :py:func:`~spikeinterface.widgets.plot_peak_activity_map` -* :py:func:`~spikeinterface.widgets.plot_principal_component` -* :py:func:`~spikeinterface.widgets.plot_unit_probe_map` -* :py:func:`~spikeinterface.widgets.plot_confusion_matrix` -* :py:func:`~spikeinterface.widgets.plot_agreement_matrix` -* :py:func:`~spikeinterface.widgets.plot_multicomp_graph` -* :py:func:`~spikeinterface.widgets.plot_multicomp_agreement` -* :py:func:`~spikeinterface.widgets.plot_multicomp_agreement_by_sorter` -* :py:func:`~spikeinterface.widgets.plot_comparison_collision_pair_by_pair` -* :py:func:`~spikeinterface.widgets.plot_comparison_collision_by_similarity` -* :py:func:`~spikeinterface.widgets.plot_sorting_performance` +* :py:func:`~spikeinterface.widgets.plot_study_run_times` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_study_unit_counts` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_study_agreement_matrix` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_study_summary` (backends: :code:`matplotlib`) +* :py:func:`~spikeinterface.widgets.plot_study_comparison_collision_by_similarity` (backends: :code:`matplotlib`) diff --git a/doc/viewers.rst b/doc/viewers.rst index 55463146ce..c3ada31b55 100644 --- a/doc/viewers.rst +++ b/doc/viewers.rst @@ -16,7 +16,7 @@ spikeinterface.widgets The easiest way to visualize :code:`spikeinterface` objects is to use the :code:`widgets` module for plotting. You can find an extensive description in the module documentation :ref:`modulewidgets` -and many examples in this tutorial :ref:`sphx_glr_modules_gallery_widgets`. +and many examples in the :code:`Widgets tutorials` section of the :code:`Modules example gallery`. spikeinterface-gui ------------------ @@ -24,7 +24,7 @@ spikeinterface-gui `spikeinterface-gui `_ is a local desktop application which is built on top of :code:`spikeinterface`. -It is the easiest and fastest way to inspect interactively a spike sorting output. +It is the easiest and fastest way to interactively inspect a spike sorting output. It's easy to install and ready to use! Authors: Samuel Garcia @@ -44,6 +44,7 @@ phy --- `phy `_ is the de-facto standard tool for manual curation of a sorting output. -The current drawback of :code:`phy` is that the dataset (including filtered signals and **all** waveforms of spikes) has to be copied in a separate folder and this is very time consuming process and occupies a lot of disk space. +The current drawback of :code:`phy` is that the dataset (including filtered signals and **all** waveforms of spikes) has to be copied +in a separate folder and this is very time consuming process and occupies a lot of disk space. Author : Cyrill Rossant diff --git a/examples/modules_gallery/core/plot_1_recording_extractor.py b/examples/modules_gallery/core/plot_1_recording_extractor.py index f20bf6497d..f5d3ee1db2 100644 --- a/examples/modules_gallery/core/plot_1_recording_extractor.py +++ b/examples/modules_gallery/core/plot_1_recording_extractor.py @@ -26,7 +26,7 @@ num_channels = 7 sampling_frequency = 30000. # in Hz -durations = [10., 15.] #  in s for 2 segments +durations = [10., 15.] # in s for 2 segments num_segments = 2 num_timepoints = [int(sampling_frequency * d) for d in durations] @@ -38,7 +38,7 @@ traces1 = np.random.normal(0, 10, (num_timepoints[1], num_channels)) ############################################################################## -# And instantiate a :py:class:`~spikeinterface.core.NumpyRecording`. Each object has a pretty print to +# And instantiate a :py:class:`~spikeinterface.core.NumpyRecording`. Each object has a pretty print to # summarize its content: recording = se.NumpyRecording(traces_list=[traces0, traces1], sampling_frequency=sampling_frequency) @@ -47,24 +47,28 @@ ############################################################################## # We can now print properties that the :code:`RecordingExtractor` retrieves from the underlying recording. -print('Num. channels = {}'.format(len(recording.get_channel_ids()))) -print('Sampling frequency = {} Hz'.format(recording.get_sampling_frequency())) -print('Num. timepoints seg0= {}'.format(recording.get_num_segments())) -print('Num. timepoints seg0= {}'.format(recording.get_num_frames(segment_index=0))) -print('Num. timepoints seg1= {}'.format(recording.get_num_frames(segment_index=1))) +print(f'Number of channels = {recording.get_channel_ids()}') +print(f'Sampling frequency = {recording.get_sampling_frequency()} Hz') +print(f'Number of segments= {recording.get_num_segments()}') +print(f'Number of timepoints in seg0= {recording.get_num_frames(segment_index=0)}') +print(f'Number of timepoints in seg1= {recording.get_num_frames(segment_index=1)}') ############################################################################## -# The geometry of the Probe is handle with the :probeinterface:`ProbeInterface <>`. -# Let's generate a linear probe: +# The geometry of the Probe is handled with the :probeinterface:`ProbeInterface <>` library. +# Let's generate a linear probe by specifying our number of electrodes/contacts (num_elec) +# the distance between the contacts (ypitch), their shape (contact_shapes) and their size +# (contact_shape_params): from probeinterface import generate_linear_probe from probeinterface.plotting import plot_probe probe = generate_linear_probe(num_elec=7, ypitch=20, contact_shapes='circle', contact_shape_params={'radius': 6}) -# the probe has to be wired to the recording +# the probe has to be wired to the recording device (i.e., which electrode corresponds to an entry in the data +# matrix) probe.set_device_channel_indices(np.arange(7)) +# then we need to actually set the probe to the recording object recording = recording.set_probe(probe) plot_probe(probe) @@ -76,14 +80,14 @@ ############################################################################## # We can read the written recording back with the proper extractor. -# Note that this new recording is now "on disk" and not "in memory" as the Numpy recording. -# This means that the loading is "lazy" and the data are not loaded in memory. +# Note that this new recording is now "on disk" and not "in memory" as the Numpy recording was. +# This means that the loading is "lazy" and the data are not loaded into memory. recording2 = se.BinaryRecordingExtractor(file_paths=file_paths, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=traces0.dtype) print(recording2) ############################################################################## -#  Loading traces in memory is done on demand: +# Loading traces in memory is done on demand: # entire segment 0 traces0 = recording2.get_traces(segment_index=0) @@ -93,8 +97,8 @@ print(traces1_short.shape) ############################################################################## -# A recording internally has :code:`channel_ids`: these are a vector that can have -# dtype int or str: +# Internally, a recording has :code:`channel_ids`: that are a vector that can have a +# dtype of :code:`int` or :code:`str`: print('chan_ids (dtype=int):', recording.get_channel_ids()) @@ -111,7 +115,7 @@ print(traces.shape) ############################################################################## -# You can also get a a recording with a subset of channel (a channel slice): +# You can also get a recording with a subset of channels (i.e. a channel slice): recording4 = recording3.channel_slice(channel_ids=['a', 'c', 'e']) print(recording4) @@ -136,7 +140,7 @@ ############################################################################### # A recording can be "dumped" (exported) to: # * a dict -#  * a json file +# * a json file # * a pickle file # # The "dump" operation is lazy, i.e., the traces are not exported. @@ -164,7 +168,7 @@ # # If you wish to also store the traces in a compact way you need to use the # :code:`save()` function. This operation is very useful to save traces obtained -# after long computation (e.g. filtering): +# after long computations (e.g. filtering or referencing): recording2.save(folder='./my_recording') diff --git a/examples/modules_gallery/core/plot_3_handle_probe_info.py b/examples/modules_gallery/core/plot_3_handle_probe_info.py index 1900b59433..d134b29ec5 100644 --- a/examples/modules_gallery/core/plot_3_handle_probe_info.py +++ b/examples/modules_gallery/core/plot_3_handle_probe_info.py @@ -4,9 +4,9 @@ In order to properly spike sort, you may need to load information related to the probe you are using. -SpikeInterface internally uses :probeinterface:`ProbeInterface <>` to handle probe or probe groups for recordings. +SpikeInterface internally uses :probeinterface:`ProbeInterface <>` to handle probes or probe groups for recordings. -Depending on the dataset, the :py:class:`~probeinterface.Probe` object can be already included or needs to be set +Depending on the dataset, the :py:class:`~probeinterface.Probe` object may already be included or might need to be set manually. Here's how! @@ -22,7 +22,7 @@ ############################################################################### # This generator already contain a probe object that you can retrieve -# directly an plot: +# directly and plot: probe = recording.get_probe() print(probe) @@ -32,13 +32,13 @@ plot_probe(probe) ############################################################################### -# You can also overwrite the probe. In that case you need to manually make +# You can also overwrite the probe. In this case you need to manually make # the wiring (e.g. virtually connect each electrode to the recording device). # Let's use a probe from Cambridge Neurotech with 32 channels: from probeinterface import get_probe -other_probe = get_probe('cambridgeneurotech', 'ASSY-37-E-1') +other_probe = get_probe(manufacturer='cambridgeneurotech', probe_name='ASSY-37-E-1') print(other_probe) other_probe.set_device_channel_indices(np.arange(32)) @@ -47,8 +47,8 @@ ############################################################################### # Now let's check what we have loaded. The `group_mode='by_shank'` automatically -# set the 'group' property depending on the shank id. -# We can use this information to split the recording in two sub recordings: +# sets the 'group' property depending on the shank id. +# We can use this information to split the recording into two sub-recordings: print(recording_2_shanks) print(recording_2_shanks.get_property('group')) diff --git a/examples/modules_gallery/core/plot_6_handle_times.py b/examples/modules_gallery/core/plot_6_handle_times.py index 81c67fc31d..4ca116e3c6 100644 --- a/examples/modules_gallery/core/plot_6_handle_times.py +++ b/examples/modules_gallery/core/plot_6_handle_times.py @@ -10,16 +10,16 @@ from spikeinterface.extractors import toy_example ############################################################################## -# First let's generate toy example with a single segment: +# First let's generate a toy example with a single segment: rec, sort = toy_example(num_segments=1) ############################################################################## -# Generally, the time information would be automaticall loaded when reading a +# Generally, the time information would be automatically loaded when reading a # recording. # However, sometimes we might need to add a time vector externally. -# For example, now let's create a time vector by getting the default times and +# For example, let's create a time vector by getting the default times and # adding 5 s: default_times = rec.get_times() diff --git a/examples/modules_gallery/extractors/plot_1_read_various_formats.py b/examples/modules_gallery/extractors/plot_1_read_various_formats.py index ed0ba34396..df85946530 100644 --- a/examples/modules_gallery/extractors/plot_1_read_various_formats.py +++ b/examples/modules_gallery/extractors/plot_1_read_various_formats.py @@ -2,10 +2,10 @@ Read various format into SpikeInterface ======================================= -SpikeInterface can read various format of "recording" (traces) and "sorting" (spike train) data. +SpikeInterface can read various formats of "recording" (traces) and "sorting" (spike train) data. Internally, to read different formats, SpikeInterface either uses: - * a wrapper to the `neo `_ rawio classes + * a wrapper to `neo `_ rawio classes * or a direct implementation Note that: @@ -18,14 +18,14 @@ import matplotlib.pyplot as plt -import spikeinterface as si +import spikeinterface.core as si import spikeinterface.extractors as se ############################################################################## # Let's download some datasets in different formats from the # `ephy_testing_data `_ repo: # -# * MEArec: an simulator format which is hdf5-based. It contains both a "recording" and a "sorting" in the same file. +# * MEArec: a simulator format which is hdf5-based. It contains both a "recording" and a "sorting" in the same file. # * Spike2: file from spike2 devices. It contains "recording" information only. @@ -36,14 +36,14 @@ print(mearec_folder_path) ############################################################################## -# Now that we have downloaded the files let's load them into SI. +# Now that we have downloaded the files, let's load them into SI. # # The :py:func:`~spikeinterface.extractors.read_spike2` function returns one object, # a :py:class:`~spikeinterface.core.BaseRecording`. # # Note that internally this file contains 2 data streams ('0' and '1'), so we need to specify which one we # want to retrieve ('0' in our case). -# the stream information can be retrieve using :py:func:`~spikeinterface.extractors.get_neo_streams` function +# the stream information can be retrieved by using the :py:func:`~spikeinterface.extractors.get_neo_streams` function. stream_names, stream_ids = se.get_neo_streams('spike2', spike2_file_path) print(stream_names) @@ -76,13 +76,13 @@ print(type(sorting)) ############################################################################## -#  The :py:func:`~spikeinterface.extractors.read_mearec` function is equivalent to: +# The :py:func:`~spikeinterface.extractors.read_mearec` function is equivalent to: recording = se.MEArecRecordingExtractor(mearec_folder_path) sorting = se.MEArecSortingExtractor(mearec_folder_path) ############################################################################## -# SI objects (:py:class:`~spikeinterface.core.BaseRecording` and :py:class:`~spikeinterface.core.BaseSorting`) object +# SI objects (:py:class:`~spikeinterface.core.BaseRecording` and :py:class:`~spikeinterface.core.BaseSorting`) # can be plotted quickly with the :py:mod:`spikeinterface.widgets` submodule: import spikeinterface.widgets as sw diff --git a/examples/modules_gallery/extractors/plot_2_working_with_unscaled_traces.py b/examples/modules_gallery/extractors/plot_2_working_with_unscaled_traces.py index 5dd8a39582..69a7e889e4 100644 --- a/examples/modules_gallery/extractors/plot_2_working_with_unscaled_traces.py +++ b/examples/modules_gallery/extractors/plot_2_working_with_unscaled_traces.py @@ -3,7 +3,7 @@ ============================ Some file formats store data in convenient types that require offsetting and scaling in order to convert the -traces to uV. This example shows how to work with unscaled and scaled traces int :py:mod:`spikeinterface.extractors` +traces to uV. This example shows how to work with unscaled and scaled traces in the :py:mod:`spikeinterface.extractors` module. ''' @@ -39,21 +39,21 @@ offset = -2 ** (10 - 1) * gain ############################################################################### -# We are now ready to set gains and offsets to our extractor. We also have to set the :code:`has_unscaled` field to +# We are now ready to set gains and offsets for our extractor. We also have to set the :code:`has_unscaled` field to # :code:`True`: recording.set_channel_gains(gain) recording.set_channel_offsets(offset) ############################################################################### -#  Internally this gains and offsets are handle with properties +# Internally the gain and offset are handled with properties # So the gain could be "by channel". print(recording.get_property('gain_to_uV')) print(recording.get_property('offset_to_uV')) ############################################################################### -# With gains and offset information, we can retrieve traces both in their unscaled (raw) type, and in their scaled +# With gain and offset information, we can retrieve traces both in their unscaled (raw) type, and in their scaled # type: traces_unscaled = recording.get_traces(return_scaled=False) diff --git a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py index 7b6aae3e30..7b2fa565b5 100644 --- a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py +++ b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py @@ -2,12 +2,12 @@ Quality Metrics Tutorial ======================== -After spike sorting, you might want to validate the goodness of the sorted units. This can be done using the +After spike sorting, you might want to validate the 'goodness' of the sorted units. This can be done using the :code:`qualitymetrics` submodule, which computes several quality metrics of the sorted units. """ -import spikeinterface as si +import spikeinterface.core as si import spikeinterface.extractors as se from spikeinterface.postprocessing import compute_principal_components from spikeinterface.qualitymetrics import (compute_snrs, compute_firing_rates, @@ -29,10 +29,15 @@ # For convenience, metrics are computed on the :code:`WaveformExtractor` object, # because it contains a reference to the "Recording" and the "Sorting" objects: -folder = 'waveforms_mearec' -we = si.extract_waveforms(recording, sorting, folder, sparse=False, - ms_before=1, ms_after=2., max_spikes_per_unit=500, - n_jobs=1, chunk_durations='1s') +we = si.extract_waveforms(recording=recording, + sorting=sorting, + folder='waveforms_mearec', + sparse=False, + ms_before=1, + ms_after=2., + max_spikes_per_unit=500, + n_jobs=1, + chunk_durations='1s') print(we) ############################################################################## @@ -51,7 +56,7 @@ # Some metrics are based on the principal component scores, so they require a # :code:`WaveformsPrincipalComponent` object as input: -pc = compute_principal_components(we, load_if_exists=True, +pc = compute_principal_components(waveform_extractor=we, load_if_exists=True, n_components=3, mode='by_channel_local') print(pc) diff --git a/examples/modules_gallery/qualitymetrics/plot_4_curation.py b/examples/modules_gallery/qualitymetrics/plot_4_curation.py index edd7a85ce5..2568452de3 100644 --- a/examples/modules_gallery/qualitymetrics/plot_4_curation.py +++ b/examples/modules_gallery/qualitymetrics/plot_4_curation.py @@ -3,13 +3,13 @@ ================== After spike sorting and computing quality metrics, you can automatically curate the spike sorting output using the -quality metrics. +quality metrics that you have calculated. """ ############################################################################# # Import the modules and/or functions necessary from spikeinterface -import spikeinterface as si +import spikeinterface.core as si import spikeinterface.extractors as se from spikeinterface.postprocessing import compute_principal_components @@ -29,11 +29,16 @@ ############################################################################## # First, we extract waveforms (to be saved in the folder 'wfs_mearec') and -# compute their PC scores: - -we = si.extract_waveforms(recording, sorting, folder='wfs_mearec', - ms_before=1, ms_after=2., max_spikes_per_unit=500, - n_jobs=1, chunk_size=30000) +# compute their PC (principal component) scores: + +we = si.extract_waveforms(recording=recording, + sorting=sorting, + folder='wfs_mearec', + ms_before=1, + ms_after=2., + max_spikes_per_unit=500, + n_jobs=1, + chunk_size=30000) print(we) pc = compute_principal_components(we, load_if_exists=True, n_components=3, mode='by_channel_local') @@ -42,7 +47,7 @@ ############################################################################## # Then we compute some quality metrics: -metrics = compute_quality_metrics(we, metric_names=['snr', 'isi_violation', 'nearest_neighbor']) +metrics = compute_quality_metrics(waveform_extractor=we, metric_names=['snr', 'isi_violation', 'nearest_neighbor']) print(metrics) ############################################################################## diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 23d13c0afe..7269960dc1 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -38,7 +38,7 @@ class GroundTruthStudy: In this case, the result dataframes will have `MultiIndex` to handle the different levels. A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see - :py:fun:`~spikeinterface.core.generate.generate_ground_truth_recording()`). + :py:func:`~spikeinterface.core.generate.generate_ground_truth_recording()`). This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. Note that the underlying folder structure is not backward compatible! diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index 72d44bf348..dfa940b979 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -150,14 +150,15 @@ def compute_spike_locations( spike_retriver_kwargs: dict A dictionary to control the behavior for getting the maximum channel for each spike This dictionary contains: - * channel_from_template: bool, default: True - For each spike is the maximum channel computed from template or re estimated at every spikes - channel_from_template = True is old behavior but less acurate - channel_from_template = False is slower but more accurate - * radius_um: float, default: 50 - In case channel_from_template=False, this is the radius to get the true peak - * peak_sign, default: "neg" - In case channel_from_template=False, this is the peak sign. + + * channel_from_template: bool, default: True + For each spike is the maximum channel computed from template or re estimated at every spikes + channel_from_template = True is old behavior but less acurate + channel_from_template = False is slower but more accurate + * radius_um: float, default: 50 + In case channel_from_template=False, this is the radius to get the true peak + * peak_sign, default: "neg" + In case channel_from_template=False, this is the peak sign. method : "center_of_mass" | "monopolar_triangulation" | "grid_convolution", default: "center_of_mass" The localization method to use method_kwargs : dict, default: dict() diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 5e934f9702..91e2c382b4 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -297,11 +297,12 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): class StudySummary(BaseWidget): """ Plot a summary of a ground truth study. - Internally does: - plot_study_run_times - plot_study_unit_counts - plot_study_performances - plot_study_agreement_matrix + Internally this plotting function runs: + + * plot_study_run_times + * plot_study_unit_counts + * plot_study_performances + * plot_study_agreement_matrix Parameters ---------- diff --git a/src/spikeinterface/widgets/multicomparison.py b/src/spikeinterface/widgets/multicomparison.py index fb34156fef..0917869f8c 100644 --- a/src/spikeinterface/widgets/multicomparison.py +++ b/src/spikeinterface/widgets/multicomparison.py @@ -206,10 +206,6 @@ class MultiCompAgreementBySorterWidget(BaseWidget): show_legend: bool Show the legend in the last axes - Returns - ------- - W: MultiCompGraphWidget - The output widget """ def __init__(