diff --git a/docs/source/submodules/stimulus_data.rst b/docs/source/submodules/stimulus_data.rst index 85fe944..83ee7a7 100644 --- a/docs/source/submodules/stimulus_data.rst +++ b/docs/source/submodules/stimulus_data.rst @@ -92,9 +92,9 @@ trial groups can be set with a utility function. :code:`set_trial_groups`, which a key of the Intan channel and a value of the desired trial groups as an :code:`np.array`. Since the channel names are not always easy to know they can be returned using :code:`get_stimulus_channels`. Finally stimulus' should be named with :code:`set_stimulus_name`. -.. code:-block:: python +.. code-block:: python - stim_dict = stim.get_stimluus_channels() + stim_dict = stim.get_stimulus_channels() stim.set_trial_groups(trial_dictionary=trial_dictionary) # dict as explained above sitm.set_stimulus_names(stim_names = name_dictionary) # same keys with str values @@ -108,6 +108,17 @@ in the case of optogenetic trains rather than looking at :code:`events`, :code:` :code:`stim_time_secs` (length of the train) must be given. +Deleting Accidental events +-------------------------- + +In the case of an accidental event (for example turning on a TTL signal for just a moment accidental) one can use the :code:`delete_events()` +function. Either one :code:`del_index` is given or a list of indices to delete. This means that if event "25" was a mistaken signal one could +do the following: + +.. code-block:: python + + stim.delete_events(del_index=24, digital=True, digital_channel="DIGITAL-IN-01") #python is 0 based so 25th event is 24 index + Saving files for easy loading ----------------------------- @@ -148,5 +159,5 @@ And remember to :code:`save_events`. stim = StimulusData(file_path='home/myawesomedata') stim.run_all(stim_length_seconds=10, stim_name=['ana1']) stim.set_trial_groups(trial_dictionary=my_dictionary) - stim.set_stimulus_names(stim_names=my_name_dictionary) + stim.set_stimulus_name(stim_names=my_name_dictionary) stim.save_events() \ No newline at end of file diff --git a/src/spikeanalysis/spike_data.py b/src/spikeanalysis/spike_data.py index 8039192..deda556 100644 --- a/src/spikeanalysis/spike_data.py +++ b/src/spikeanalysis/spike_data.py @@ -95,6 +95,8 @@ def run_all( idthres: float, rpv: float, sil: float, + amp_std: float, + amp_cutoff: float, recurated: bool = False, set_caching: bool = True, depth: float = 0, @@ -113,6 +115,11 @@ def run_all( the refractory period violation fraction allowed: 0.02 would be 2% violations sil: float the silhouette score allowed -1 (bad) to 1 (perfect) + amp_std: float + the std to use for calculating the number of spikes which past that range + amp_cutoff: float + the cutoff value to use for qc for the amplitudes values, example .98 means 98% fall within + amp_std stds of the mean amplitude recurated: bool If more Phy curation has occurred such that any cached or currently loaded values need to be overwritten (True), Default False @@ -140,10 +147,12 @@ def run_all( except AttributeError: self.generate_pcs() self.generate_qcmetrics() - self.qc_preprocessing(idthres=idthres, rpv=rpv, sil=sil, recurated=recurated) - self.set_qc() + self.get_waveforms() self.get_waveform_values(depth=depth) + self.get_amplitudes(std=amp_std) + self.qc_preprocessing(idthres=idthres, rpv=rpv, sil=sil, amp_cutoff=amp_cutoff, recurated=recurated) + self.set_qc() self._return_to_dir(current_dir) def denoise_data(self): diff --git a/src/spikeanalysis/spike_plotter.py b/src/spikeanalysis/spike_plotter.py index c20d0c8..2e487a8 100644 --- a/src/spikeanalysis/spike_plotter.py +++ b/src/spikeanalysis/spike_plotter.py @@ -654,9 +654,7 @@ def _get_event_lengths(self) -> dict: stim_dict = self.data._get_key_for_stim() for key, value in stim_dict.items(): - stim_lengths[key] = np.mean( - np.array(self.data.events[value]["lengths"]) / self.data._sampling_rate - ) + stim_lengths[key] = np.mean(np.array(self.data.events[value]["lengths"]) / self.data._sampling_rate) return stim_lengths def _get_event_lengths_all(self) -> dict: @@ -672,7 +670,7 @@ def _get_event_lengths_all(self) -> dict: for key, value in stim_dict.items(): stim_lengths[key] = np.array(self.data.events[value]["lengths"]) / self.data._sampling_rate - + return stim_lengths def _get_trial_groups(self) -> dict: diff --git a/src/spikeanalysis/stimulus_data.py b/src/spikeanalysis/stimulus_data.py index 88853db..1fbb10c 100644 --- a/src/spikeanalysis/stimulus_data.py +++ b/src/spikeanalysis/stimulus_data.py @@ -1,3 +1,4 @@ +from __future__ import annotations import json import os from .utils import NumpyEncoder @@ -302,23 +303,18 @@ def get_final_digital_data(self): except TypeError: raise Exception("There is no digital data present") - fid = open(self._filename, "rb") - intan_header = self._read_header(fid) - fid.close() - dig_in_channels = intan_header["board_dig_in_channels"] - self.intan = intan_header - - values = np.zeros((len(dig_in_channels), len(self._raw_digital_data))) - for value in range(len(dig_in_channels)): - values[value, :] = np.not_equal( # this operation comes from the python Intan code + values = np.zeros((16, len(self._raw_digital_data))) # 16 digital-in for intan + for value in range(1, 17): + values[value - 1, :] = np.not_equal( # this operation comes from the python Intan code np.bitwise_and( self._raw_digital_data, - (1 << dig_in_channels[value]["native_order"]), + (1 << value), ), 0, ) - self.digital_data = values - self.dig_in_channels = dig_in_channels + + self.dig_in_channels = np.nonzero(np.sum(values, axis=1))[0] + 1 + self.digital_data = values[np.nonzero(np.sum(values, axis=1))[0]] def generate_digital_events(self): assert self.digital_data is not None, "There is no final digital data, run `get_final_digital_data` first" @@ -326,17 +322,19 @@ def generate_digital_events(self): self.digital_events = {} self.digital_channels = [] for idx, row in enumerate(tqdm(self.digital_data)): - self.digital_events[self.dig_in_channels[idx]["native_channel_name"]] = {} + if idx < 10: + title = "DIGITAL-IN-0" + else: + title = "DIGITAL-IN-" + self.digital_events[title + str(self.dig_in_channels[idx])] = {} events, lengths = self._calculate_events(self.digital_data[idx]) - self.digital_events[self.dig_in_channels[idx]["native_channel_name"]]["events"] = events - self.digital_events[self.dig_in_channels[idx]["native_channel_name"]]["lengths"] = lengths - self.digital_events[self.dig_in_channels[idx]["native_channel_name"]]["trial_groups"] = np.ones( - (len(events)) - ) + self.digital_events[title + str(self.dig_in_channels[idx])]["events"] = events + self.digital_events[title + str(self.dig_in_channels[idx])]["lengths"] = lengths + self.digital_events[title + str(self.dig_in_channels[idx])]["trial_groups"] = np.ones((len(events))) if len(events) == 0: - del self.digital_events[self.dig_in_channels[idx]["native_channel_name"]] + del self.digital_events[title + str(self.dig_in_channels[idx])] else: - self.digital_channels.append(self.dig_in_channels[idx]["native_channel_name"]) + self.digital_channels.append(title + str(self.dig_in_channels[idx])) def get_stimulus_channels(self) -> dict: """ @@ -387,6 +385,11 @@ def set_trial_groups(self, trial_dictionary: dict): """ try: for channel in trial_dictionary.keys(): + trial_groups = self.digital_events[channel]["trial_groups"] + assert len(trial_groups) == len( + trial_dictionary[channel] + ), f"for {channel} you have {len(trial_groups)} trial groups, \ + but you put in {len(trial_dictionary[channel])} trial groups" self.digital_events[channel]["trial_groups"] = trial_dictionary[channel] except KeyError: raise Exception( @@ -468,6 +471,49 @@ def save_events(self): except AttributeError: print("No analog events to save") + def delete_events( + self, + del_index: int | list[int], + digital: bool = True, + channel_name: Optional[str] = None, + channel_index: Optional[int] = None, + ): + """ + Function for deleting a spurious event, eg, an accident trigger event + + Parameters + ---------- + digital: bool, default: True + Whether to delete digital or analog events + channel_name: str | None, default: None + the channel name of a digital signal to clean up, must be given if digital=True + channel_index: int | None, default: None + the channel index of an analog event to delete, must be given if digital=False + del_index: int | None, default: None + the index of the event which is to be delete + """ + + del_index = np.array(del_index) + if digital: + assert channel_name is not None, "must give channel_name if removing a digital event" + data = self.digital_events + key = channel_name + else: + assert channel_index is not None, " must give channel_index if removing an analog event" + data = self.dig_analog_events + key = str(channel_index) + + data_to_clean = data[key] + + for keys in ["events", "lengths", "trial_groups"]: + assert np.max(del_index) < len(data_to_clean[keys]) + data_to_clean[keys] = np.delete(data_to_clean[keys], del_index) + + if digital: + self.digital_events[key] = data_to_clean + else: + self.dig_analog_events[key] = data_to_clean + def _intan_neo_read_no_dig(self, reader: neo.rawio.IntanRawIO, time_slice=(None, None)) -> np.array: """ Utility function that hacks the Neo memmap structure to be able to read @@ -538,217 +584,3 @@ def _calculate_events(self, array: np.array) -> tuple[np.array, np.array]: lengths = offset - onset return onset, lengths - - def _read_header(self, fid) -> dict: - """ - The official Intan reader header function with edits. This function is needed - until Neo allows access to digital information in their IntanRawIO class - - Parameters - ---------- - fid : file id - id of the rhd file - - - Returns - ------- - header: dict - A dictionary of all the channel information of the rhd file - - """ - # Michael Gibson 23 APRIL 2015 - # Adrian Foy Sep 2018 - import struct - - (magic_number,) = struct.unpack("= 1) or (version["major"] > 1): - (header["num_temp_sensor_channels"],) = struct.unpack("= 3)) or (version["major"] > 1): - (header["eval_board_mode"],) = struct.unpack(" 1: - header["reference_channel"] = self._read_qstring(fid) - header["num_samples_per_data_block"] = 128 - - # Place frequency-related information in data structure. (Note: much of this structure is set above) - freq["amplifier_sample_rate"] = header["sample_rate"] - freq["aux_input_sample_rate"] = header["sample_rate"] / 4 - freq["supply_voltage_sample_rate"] = header["sample_rate"] / header["num_samples_per_data_block"] - freq["board_adc_sample_rate"] = header["sample_rate"] - freq["board_dig_in_sample_rate"] = header["sample_rate"] - - header["frequency_parameters"] = freq - - # Create structure arrays for each type of data channel. - header["spike_triggers"] = [] - header["amplifier_channels"] = [] - header["aux_input_channels"] = [] - header["supply_voltage_channels"] = [] - header["board_adc_channels"] = [] - header["board_dig_in_channels"] = [] - header["board_dig_out_channels"] = [] - - # Read signal summary from data file header. - - (number_of_signal_groups,) = struct.unpack(" 0) and (signal_group_enabled > 0): - for signal_channel in range(0, signal_group_num_channels): - new_channel = { - "port_name": signal_group_name, - "port_prefix": signal_group_prefix, - "port_number": signal_group, - } - new_channel["native_channel_name"] = self._read_qstring(fid) - new_channel["custom_channel_name"] = self._read_qstring(fid) - ( - new_channel["native_order"], - new_channel["custom_order"], - signal_type, - channel_enabled, - new_channel["chip_channel"], - new_channel["board_stream"], - ) = struct.unpack(" str: - """ - Utility function from Intan for reading qstrings. Edits from ZM - - Parameters - ---------- - fid : TYPE - DESCRIPTION. - - Returns - ------- - str - q string returned as string - - """ - # Michael Gibson 23APRIL2015 - # ZM some changes - """ - Parameters - ---------- - fid: file id - Returns - ------- - a string from the file""" - import struct, os - - (length,) = struct.unpack(" (os.fstat(fid.fileno()).st_size - fid.tell() + 1): - print(length) - raise Exception("Length too long.") - - # convert length from bytes to 16-bit Unicode words - length = int(length / 2) - - data = [] - for _ in range(0, length): - (c,) = struct.unpack("