Skip to content

Commit

Permalink
Merge branch 'other-stim' of https://github.com/zm711/spikeanalysis i…
Browse files Browse the repository at this point in the history
…nto other-stim
  • Loading branch information
zm711 committed Nov 22, 2023
2 parents 166e20b + fe7c1b0 commit bb35a8b
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 20 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: '3.10'
python-version: '3.11'
- uses: conda-incubator/[email protected]
with:
miniforge-variant: Mambaforge
Expand Down
4 changes: 2 additions & 2 deletions docs/source/submodules/stimulus_data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ do the following:

.. code-block:: python
stim.delete_events(del_index=24, digital=True, digital_channel="DIGITAL-IN-01") #python is 0 based so 25th event is 24 index
stim.delete_events(del_index=24, digital=True, channel_name="DIGITAL-IN-01") #python is 0 based so 25th event is 24th index
Saving files for easy loading
-----------------------------
Expand Down Expand Up @@ -159,4 +159,4 @@ And remember to :code:`save_events`.
stim.run_all(stim_length_seconds=10, stim_name=['ana1'])
stim.set_trial_groups(trial_dictionary=my_dictionary)
stim.set_stimulus_name(stim_names=my_name_dictionary)
stim.save_events()
stim.save_events()
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ channels:
- conda-forge
- defaults
dependencies:
- python
- python<3.12
- numpy
- scipy
- matplotlib
Expand Down
4 changes: 2 additions & 2 deletions environment_dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ channels:
- conda-forge
- defaults
dependencies:
- python
- python<3.12
- numpy
- scipy
- matplotlib
Expand All @@ -17,4 +17,4 @@ dependencies:
- pytest-cov
- pip
- pip:
- git+https://github.com/NeuralEnsemble/python-neo.git
- git+https://github.com/NeuralEnsemble/python-neo.git
9 changes: 8 additions & 1 deletion src/spikeanalysis/intrinsic_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,15 +133,22 @@ def plot_waveforms(self, sp: SpikeData):
A SpikeData object which has raw waveform values loaded"""

waveforms = sp.waveforms

sp.reload_data()
if len(sp._cids) != np.shape(waveforms)[0]: # if not same need to run set_qc
sp.set_qc()
if len(sp._cids) != np.shape(waveforms)[0]: # still not same need to index waveforms
waveforms = waveforms[sp._qc_threshold, ...]

try:
noise = sp.noise
except AttributeError:
noise = np.array([])

mean_waveforms = np.nanmean(waveforms, axis=1)

for cluster in range(np.shape(waveforms)[0]):
if self._cids[cluster] in noise:
pass
max_val = np.argwhere(mean_waveforms[cluster] == np.min(mean_waveforms[cluster]))[0]
max_channel = max_val[0]

Expand Down
2 changes: 1 addition & 1 deletion src/spikeanalysis/spike_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def denoise_data(self):

# if len(cids) > len(self._cids):
# cids = self._cids
# self._cids = self._cids[np.isin(cids, noise_clusters, invert=True)]
self._cids = self._cids[np.isin(self._cids, noise_clusters, invert=True)]

self._return_to_dir(current_dir)

Expand Down
34 changes: 22 additions & 12 deletions src/spikeanalysis/spike_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def set_analysis(self, analysis: SpikeAnalysis):
def plot_zscores(
self,
figsize: Optional[tuple] = (24, 10),
sorting_index: Optional[int] = None,
sorting_index: Optional[int] | list[int] = None,
z_bar: Optional[list[int]] = None,
indices: bool = False,
show_stim: bool = True,
Expand All @@ -95,7 +95,7 @@ def plot_zscores(
----------
figsize : Optional[tuple], optional
Matplotlib figsize tuple. For multiple trial groups bigger is better. The default is (24, 10).
sorting_index : Optional[int], optional
sorting_index : Optional[int] | list[int], optional
The trial group to sort all values on. The default is None (which uses the largest trial group).
z_bar: list[int]
If given a list with min z score for the cbar at index 0 and the max at index 1. Overrides cbar generation
Expand All @@ -110,7 +110,9 @@ def plot_zscores(
if indices is True, the function will return the cluster ids as displayed in the z bar graph
"""
reset = False
if self.cmap is None:
reset = True
self.cmap = "vlag"

sorted_cluster_ids = self._plot_scores(
Expand All @@ -121,16 +123,16 @@ def plot_zscores(
indices=indices,
show_stim=show_stim,
)

self.cmap = None
if reset:
self.cmap = None

if indices:
return sorted_cluster_ids

def plot_raw_firing(
self,
figsize: Optional[tuple] = (24, 10),
sorting_index: Optional[int] = None,
sorting_index: Optional[int] | list[int] = None,
bar: Optional[list[int]] = None,
indices: bool = False,
show_stim: bool = True,
Expand All @@ -146,7 +148,7 @@ def plot_raw_firing(
----------
figsize : Optional[tuple], optional
Matplotlib figsize tuple. For multiple trial groups bigger is better. The default is (24, 10).
sorting_index : Optional[int], optional
sorting_index : Optional[int] | list[int], optional
The trial group to sort all values on. The default is None (which uses the largest trial group).
bar: list[int]
If given a list with min firing rate for the cbar at index 0 and the max at index 1. Overrides cbar generation
Expand All @@ -161,14 +163,17 @@ def plot_raw_firing(
if indices is True, the function will return the cluster ids as displayed in the z bar graph
"""
reset = False
if self.cmap is None:
reset = True
self.cmap = "viridis"

sorted_cluster_ids = self._plot_scores(
data="raw-data", figsize=figsize, sorting_index=sorting_index, bar=bar, indices=indices, show_stim=show_stim
)

self.cmap = None
if reset:
self.cmap = None

if indices:
return sorted_cluster_ids
Expand All @@ -177,7 +182,7 @@ def _plot_scores(
self,
data: str = "zscore",
figsize: Optional[tuple] = (24, 10),
sorting_index: Optional[int] = None,
sorting_index: Optional[int] | list[int] = None,
bar: Optional[list[int]] = None,
indices: bool = False,
show_stim: bool = True,
Expand Down Expand Up @@ -207,7 +212,7 @@ def _plot_scores(
if indices is True, the function will return the cluster ids as displayed in the z bar graph
"""

if data == "zscore":
z_scores = self.data.z_scores
elif data == "raw-data":
Expand All @@ -230,7 +235,7 @@ def _plot_scores(

stim_lengths = self._get_event_lengths()
sorted_cluster_ids = {}
for stimulus in z_scores.keys():
for stim_idx, stimulus in enumerate(z_scores.keys()):
if len(np.shape(z_scores)) < 3:
sub_zscores = np.expand_dims(z_scores[stimulus], axis=1)
sub_zscores = z_scores[stimulus]
Expand All @@ -250,14 +255,19 @@ def _plot_scores(
bins = bins[np.logical_and(bins >= z_window[0], bins <= z_window[1])]

if sorting_index is None:
sorting_index = np.shape(sub_zscores)[1] - 1
current_sorting_index = np.shape(sub_zscores)[1] - 1
RESET_INDEX = True

else:
RESET_INDEX = False
assert isinstance(sorting_index, (list,int)), "sorting_index must be list or int"
if isinstance(sorting_index, list):
current_sorting_index = sorting_index[stim_idx]
else:
current_sorting_index = sorting_index
event_window = np.logical_and(bins >= 0, bins <= length)

z_score_sorting_index = np.argsort(-np.sum(sub_zscores[:, sorting_index, event_window], axis=1))
z_score_sorting_index = np.argsort(-np.sum(sub_zscores[:, current_sorting_index, event_window], axis=1))
sorted_cluster_ids[stimulus] = self.data.cluster_ids[z_score_sorting_index]
sorted_z_scores = sub_zscores[z_score_sorting_index, :, :]

Expand Down

0 comments on commit bb35a8b

Please sign in to comment.