Skip to content

Commit

Permalink
Merge pull request #3061 from alejoe91/prepare-0.100.8
Browse files Browse the repository at this point in the history
Prepare 0.100.8 release
  • Loading branch information
alejoe91 authored Jun 24, 2024
2 parents 9a31eac + 9e8a709 commit 1197aad
Show file tree
Hide file tree
Showing 39 changed files with 155 additions and 139 deletions.
2 changes: 1 addition & 1 deletion doc/releases/0.100.7.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
SpikeInterface 0.100.7 release notes
------------------------------------

7th June 2024
7th June 2024

Minor release with bug fixes

Expand Down
16 changes: 16 additions & 0 deletions doc/releases/0.100.8.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
.. _release0.100.8:

SpikeInterface 0.100.8 release notes
------------------------------------

24th June 2024

Minor release with bug fixes

* Remove separate default job_kwarg n_jobs for sorters (#2712)
* Fix math error in sd_ratio (#2964)
* Add `whiteningRange` added as Kilosort 2/2.5/3 parameter (#2997)
* Make sure we check `is_filtered()`` rather than bound method during run basesorter (#3037)
* Numpy 2.0 cap Fix most egregorious deprecated behavior and cap version (#3032, #3056)
* Add support for kilosort>=4.0.12 (#3055)
* Check start_frame/end_frame in BaseRecording.get_traces() (#3059)
7 changes: 7 additions & 0 deletions doc/whatisnew.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Release notes
.. toctree::
:maxdepth: 1

releases/0.100.8.rst
releases/0.100.7.rst
releases/0.100.6.rst
releases/0.100.5.rst
Expand Down Expand Up @@ -41,6 +42,12 @@ Release notes
releases/0.9.1.rst


Version 0.100.8
===============

* Minor release with bug fixes


Version 0.100.7
===============

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "spikeinterface"
version = "0.100.7"
version = "0.100.8"
authors = [
{ name="Alessio Buccino", email="[email protected]" },
{ name="Samuel Garcia", email="[email protected]" },
Expand All @@ -20,7 +20,7 @@ classifiers = [


dependencies = [
"numpy",
"numpy>=1.20, <2.0", # 1.20 np.ptp, 1.26 might be necessary for avoiding pickling errors when numpy >2.0
"threadpoolctl>=3.0.0",
"tqdm",
"zarr>=2.16,<2.18",
Expand Down
3 changes: 3 additions & 0 deletions src/spikeinterface/core/baserecording.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,9 @@ def get_traces(
segment_index = self._check_segment_index(segment_index)
channel_indices = self.ids_to_indices(channel_ids, prefer_slice=True)
rs = self._recording_segments[segment_index]
start_frame = int(start_frame) if start_frame is not None else 0
num_samples = rs.get_num_samples()
end_frame = int(min(end_frame, num_samples)) if end_frame is not None else num_samples
traces = rs.get_traces(start_frame=start_frame, end_frame=end_frame, channel_indices=channel_indices)
if order is not None:
assert order in ["C", "F"]
Expand Down
7 changes: 6 additions & 1 deletion src/spikeinterface/core/core_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,12 @@ def default(self, obj):
if isinstance(obj, np.generic):
return obj.item()

if np.issctype(obj): # Cast numpy datatypes to their names
# Standard numpy dtypes like np.dtype('int32") are transformed this way
if isinstance(obj, np.dtype):
return np.dtype(obj).name

# This will transform to a string canonical representation of the dtype (e.g. np.int32 -> 'int32')
if isinstance(obj, type) and issubclass(obj, np.generic):
return np.dtype(obj).name

if isinstance(obj, np.ndarray):
Expand Down
4 changes: 0 additions & 4 deletions src/spikeinterface/core/frameslicerecording.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,6 @@ def get_num_samples(self):
return self.end_frame - self.start_frame

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()
parent_start = self.start_frame + start_frame
parent_end = self.start_frame + end_frame
traces = self._parent_recording_segment.get_traces(
Expand Down
8 changes: 2 additions & 6 deletions src/spikeinterface/core/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -1092,9 +1092,6 @@ def get_traces(
end_frame: Union[int, None] = None,
channel_indices: Union[List, None] = None,
) -> np.ndarray:
start_frame = 0 if start_frame is None else max(start_frame, 0)
end_frame = self.num_samples if end_frame is None else min(end_frame, self.num_samples)

start_frame_within_block = start_frame % self.noise_block_size
end_frame_within_block = end_frame % self.noise_block_size
num_samples = end_frame - start_frame
Expand Down Expand Up @@ -1650,9 +1647,6 @@ def get_traces(
end_frame: Union[int, None] = None,
channel_indices: Union[List, None] = None,
) -> np.ndarray:
start_frame = 0 if start_frame is None else start_frame
end_frame = self.num_samples if end_frame is None else end_frame

if channel_indices is None:
n_channels = self.templates.shape[2]
elif isinstance(channel_indices, slice):
Expand Down Expand Up @@ -1688,6 +1682,8 @@ def get_traces(
end_traces = start_traces + template.shape[0]
if start_traces >= end_frame - start_frame or end_traces <= 0:
continue
start_traces = int(start_traces)
end_traces = int(end_traces)

start_template = 0
end_template = template.shape[0]
Expand Down
13 changes: 8 additions & 5 deletions src/spikeinterface/core/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ def set_global_tmp_folder(folder):
temp_folder_set = True


def is_set_global_tmp_folder():
def is_set_global_tmp_folder() -> bool:
"""
Check is the global path temporary folder have been manually set.
Check if the global path temporary folder have been manually set.
"""
global temp_folder_set
return temp_folder_set
Expand Down Expand Up @@ -88,9 +88,9 @@ def set_global_dataset_folder(folder):
dataset_folder_set = True


def is_set_global_dataset_folder():
def is_set_global_dataset_folder() -> bool:
"""
Check is the global path dataset folder have been manually set.
Check if the global path dataset folder has been manually set.
"""
global dataset_folder_set
return dataset_folder_set
Expand Down Expand Up @@ -138,7 +138,10 @@ def reset_global_job_kwargs():
global_job_kwargs = dict(n_jobs=1, chunk_duration="1s", progress_bar=True)


def is_set_global_job_kwargs_set():
def is_set_global_job_kwargs_set() -> bool:
"""
Check if the global job kwargs have been manually set.
"""
global global_job_kwargs_set
return global_job_kwargs_set

Expand Down
32 changes: 22 additions & 10 deletions src/spikeinterface/core/job_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import warnings

import sys
import contextlib
from tqdm.auto import tqdm

from concurrent.futures import ProcessPoolExecutor
Expand All @@ -28,8 +27,9 @@
Total memory usage (e.g. "500M", "2G")
- chunk_duration : str or float or None
Chunk duration in s if float or with units if str (e.g. "1s", "500ms")
* n_jobs: int
Number of jobs to use. With -1 the number of jobs is the same as number of cores
* n_jobs: int | float
Number of jobs to use. With -1 the number of jobs is the same as number of cores.
Using a float between 0 and 1 will use that fraction of the total cores.
* progress_bar: bool
If True, a progress bar is printed
* mp_context: "fork" | "spawn" | None, default: None
Expand Down Expand Up @@ -60,38 +60,50 @@


def fix_job_kwargs(runtime_job_kwargs):
from .globals import get_global_job_kwargs
from .globals import get_global_job_kwargs, is_set_global_job_kwargs_set

job_kwargs = get_global_job_kwargs()

for k in runtime_job_kwargs:
assert k in job_keys, (
f"{k} is not a valid job keyword argument. " f"Available keyword arguments are: {list(job_keys)}"
)

# remove mutually exclusive from global job kwargs
for k, v in runtime_job_kwargs.items():
if k in _mutually_exclusive and v is not None:
for key_to_remove in _mutually_exclusive:
if key_to_remove in job_kwargs:
job_kwargs.pop(key_to_remove)

# remove None
runtime_job_kwargs_exclude_none = runtime_job_kwargs.copy()
for job_key, job_value in runtime_job_kwargs.items():
if job_value is None:
del runtime_job_kwargs_exclude_none[job_key]
job_kwargs.update(runtime_job_kwargs_exclude_none)

# if n_jobs is -1, set to os.cpu_count() (n_jobs is always in global job_kwargs)
n_jobs = job_kwargs["n_jobs"]
assert isinstance(n_jobs, (float, np.integer, int))
if isinstance(n_jobs, float):
assert isinstance(n_jobs, (float, np.integer, int)) and n_jobs != 0, "n_jobs must be a non-zero int or float"
# for a fraction we do fraction of total cores
if isinstance(n_jobs, float) and 0 < n_jobs <= 1:
n_jobs = int(n_jobs * os.cpu_count())
# for negative numbers we count down from total cores (with -1 being all)
elif n_jobs < 0:
n_jobs = os.cpu_count() + 1 + n_jobs
n_jobs = int(os.cpu_count() + 1 + n_jobs)
# otherwise we just take the value given
else:
n_jobs = int(n_jobs)

job_kwargs["n_jobs"] = max(n_jobs, 1)

if "n_jobs" not in runtime_job_kwargs and job_kwargs["n_jobs"] == 1 and not is_set_global_job_kwargs_set():
warnings.warn(
"`n_jobs` is not set so parallel processing is disabled! "
"To speed up computations, it is recommended to set n_jobs either "
"globally (with the `spikeinterface.set_global_job_kwargs()` function) or "
"locally (with the `n_jobs` argument). Use `spikeinterface.set_global_job_kwargs?` "
"for more information about job_kwargs."
)

return job_kwargs


Expand Down
5 changes: 0 additions & 5 deletions src/spikeinterface/core/segmentutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,6 @@ def get_num_samples(self):
return self.total_length

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()

# # Ensures that we won't request invalid segment indices
if (start_frame >= self.get_num_samples()) or (end_frame <= start_frame):
# Return (0 * num_channels) array of correct dtype
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def test_BinaryRecordingExtractor():

def test_round_trip(tmp_path):
num_channels = 10
num_samples = 50
num_samples = 500
traces_list = [np.ones(shape=(num_samples, num_channels), dtype="int32")]
sampling_frequency = 30_000.0
recording = NumpyRecording(traces_list=traces_list, sampling_frequency=sampling_frequency)
Expand Down
12 changes: 12 additions & 0 deletions src/spikeinterface/core/tests/test_globals.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pytest
import warnings
from pathlib import Path

from spikeinterface import (
Expand Down Expand Up @@ -39,11 +40,22 @@ def test_global_tmp_folder():
def test_global_job_kwargs():
job_kwargs = dict(n_jobs=4, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1)
global_job_kwargs = get_global_job_kwargs()

# test warning when not setting n_jobs and calling fix_job_kwargs
with pytest.warns(UserWarning):
job_kwargs_split = fix_job_kwargs({})

assert global_job_kwargs == dict(
n_jobs=1, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1
)
set_global_job_kwargs(**job_kwargs)
assert get_global_job_kwargs() == job_kwargs

# after setting global job kwargs, fix_job_kwargs should not raise a warning
with warnings.catch_warnings():
warnings.simplefilter("error")
job_kwargs_split = fix_job_kwargs({})

# test updating only one field
partial_job_kwargs = dict(n_jobs=2)
set_global_job_kwargs(**partial_job_kwargs)
Expand Down
6 changes: 3 additions & 3 deletions src/spikeinterface/core/tests/test_job_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,10 +180,10 @@ def test_fix_job_kwargs():
else:
assert fixed_job_kwargs["n_jobs"] == 1

# test minimum n_jobs
job_kwargs = dict(n_jobs=0, progress_bar=False, chunk_duration="1s")
# test float value > 1 is cast to correct int
job_kwargs = dict(n_jobs=float(os.cpu_count()), progress_bar=False, chunk_duration="1s")
fixed_job_kwargs = fix_job_kwargs(job_kwargs)
assert fixed_job_kwargs["n_jobs"] == 1
assert fixed_job_kwargs["n_jobs"] == os.cpu_count()

# test wrong keys
with pytest.raises(AssertionError):
Expand Down
1 change: 0 additions & 1 deletion src/spikeinterface/core/tests/test_jsonification.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ def test_numpy_dtype_alises_encoding():
# People tend to use this a dtype instead of the proper classes
json.dumps(np.int32, cls=SIJsonEncoder)
json.dumps(np.float32, cls=SIJsonEncoder)
json.dumps(np.bool_, cls=SIJsonEncoder) # Note that np.bool was deperecated in numpy 1.20.0


def test_recording_encoding(numpy_generated_recording):
Expand Down
4 changes: 0 additions & 4 deletions src/spikeinterface/extractors/cbin_ibl.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,6 @@ def get_num_samples(self):
return self._cbuffer.shape[0]

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()
if channel_indices is None:
channel_indices = slice(None)

Expand Down
5 changes: 0 additions & 5 deletions src/spikeinterface/extractors/nwbextractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -873,11 +873,6 @@ def get_num_samples(self):
return self._num_samples

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()

electrical_series_data = self.electrical_series_data
if electrical_series_data.ndim == 1:
traces = electrical_series_data[start_frame:end_frame][:, np.newaxis]
Expand Down
5 changes: 0 additions & 5 deletions src/spikeinterface/preprocessing/average_across_direction.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,6 @@ def get_num_samples(self):
return self.parent_recording_segment.get_num_samples()

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()

parent_traces = self.parent_recording_segment.get_traces(
start_frame=start_frame,
end_frame=end_frame,
Expand Down
7 changes: 0 additions & 7 deletions src/spikeinterface/preprocessing/decimate.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,6 @@ def get_num_samples(self):
return int(np.ceil((parent_n_samp - self._decimation_offset) / self._decimation_factor))

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()
end_frame = min(end_frame, self.get_num_samples())
start_frame = min(start_frame, self.get_num_samples())

# Account for offset and end when querying parent traces
parent_start_frame = self._decimation_offset + start_frame * self._decimation_factor
parent_end_frame = parent_start_frame + (end_frame - start_frame) * self._decimation_factor
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,12 +147,6 @@ def get_traces(self, start_frame, end_frame, channel_indices):

n_frames = self.parent_recording_segment.get_num_samples()

if start_frame == None:
start_frame = 0

if end_frame == None:
end_frame = n_frames

# for frames that lack full training data (i.e. pre and post frames including omissinos),
# just return uninterpolated
if start_frame < self.pre_frame + self.pre_post_omission:
Expand Down
5 changes: 0 additions & 5 deletions src/spikeinterface/preprocessing/directional_derivative.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,6 @@ def __init__(
self.unique_pos_other_dims, self.column_inds = np.unique(geom_other_dims, axis=0, return_inverse=True)

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()

parent_traces = self.parent_recording_segment.get_traces(
start_frame=start_frame,
end_frame=end_frame,
Expand Down
4 changes: 0 additions & 4 deletions src/spikeinterface/preprocessing/phase_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,6 @@ def __init__(self, parent_recording_segment, sample_shifts, margin, dtype, tmp_d
self.tmp_dtype = tmp_dtype

def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()
if channel_indices is None:
channel_indices = slice(None)

Expand Down
Loading

0 comments on commit 1197aad

Please sign in to comment.