diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4d0e20139..c7ff9e7c3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -2,8 +2,6 @@ name: Release on: push: - branches: - - iblrigv8 tags: - '[0-9]+.[0-9]+.[0-9]+' @@ -41,4 +39,4 @@ jobs: name: documentation - uses: softprops/action-gh-release@v2 with: - files: documentation/*.pdf \ No newline at end of file + files: iblrig_*_reference.pdf diff --git a/.gitignore b/.gitignore index f917b05c2..283cedd7f 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,4 @@ devices/camera_recordings/*.layout .pdm-python /dist *~ +docs/source/api/* \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 20fc9c08a..b05ab29b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,18 @@ Changelog ========= +8.24.0 +------ +* feature: validate values in `trials_table` using Pydantic +* feature: add auto-generated API reference to documentation +* changed: `show_trial_log()` now accepts a dict for including additional log items +* fix: `_ephysChoiceWorld` - values from the pre-generated sessions were not actually used +* fix: `_ephysChoiceWorld` - trial fixtures contained inverted values for `probability_left` +* fix: GUI - Subjects and Projects are not being cached +* add script for validating audio output of Bpod HiFi Module (in `scripts/` folder) + +------------------------------- + 8.23.1 ------ * feature: post hardware information to alyx diff --git a/docs/source/_templates/custom-class-template.rst b/docs/source/_templates/custom-class-template.rst new file mode 100644 index 000000000..999ac4127 --- /dev/null +++ b/docs/source/_templates/custom-class-template.rst @@ -0,0 +1,12 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. inheritance-diagram:: {{ objname }} + :parts: 1 + +| + +.. autoclass:: {{ objname }} + :members: + :undoc-members: diff --git a/docs/source/_templates/custom-module-template.rst b/docs/source/_templates/custom-module-template.rst new file mode 100644 index 000000000..405e407a9 --- /dev/null +++ b/docs/source/_templates/custom-module-template.rst @@ -0,0 +1,84 @@ +{{ fullname | escape | underline}} + +.. automodule:: {{ fullname }} + {% block attributes %} + {%- if attributes %} + .. rubric:: {{ _('Module Attributes') }} + + .. autosummary:: + :nosignatures: + :toctree: + {% for item in attributes %} + {{ item }} + {%- endfor %} + {% endif %} + {%- endblock %} + + {%- block functions %} + {%- if functions %} + .. rubric:: {{ _('Functions') }} + + .. autosummary:: + :nosignatures: + :toctree: + {% for item in functions %} + {{ item }} + {%- endfor %} + {% endif %} + {%- endblock %} + + {%- block classes %} + {%- if classes %} + .. rubric:: {{ _('Classes') }} + + .. autosummary:: + :nosignatures: + :toctree: + :template: custom-class-template.rst + {% for item in classes %} + {{ item }} + {%- endfor %} + {% endif %} + {%- endblock %} + + {%- block exceptions %} + {%- if exceptions %} + .. rubric:: {{ _('Exceptions') }} + + .. autosummary:: + :nosignatures: + :toctree: + {% for item in exceptions %} + {{ item }} + {%- endfor %} + {% endif %} + {%- endblock %} + +{%- block modules %} +{%- if modules or name == 'iblrig_tasks' %} +.. rubric:: Modules + +.. autosummary:: + :nosignatures: + :toctree: + :template: custom-module-template.rst + :recursive: +{% for item in modules %} + {%- if item != 'test' %} {# EXCLUDE TESTS FROM API #} + {{ item }} + {% endif %} +{%- endfor %} +{%- if name == 'iblrig_tasks' %} + _iblrig_tasks_advancedChoiceWorld + _iblrig_tasks_biasedChoiceWorld + _iblrig_tasks_ephysChoiceWorld + _iblrig_tasks_habituationChoiceWorld + _iblrig_tasks_ImagingChoiceWorld + _iblrig_tasks_neuroModulatorChoiceWorld + _iblrig_tasks_passiveChoiceWorld + _iblrig_tasks_spontaneous + _iblrig_tasks_trainingChoiceWorld + _iblrig_tasks_trainingPhaseChoiceWorld +{% endif %} +{% endif %} +{%- endblock %} diff --git a/docs/source/api.rst b/docs/source/api.rst new file mode 100644 index 000000000..793234776 --- /dev/null +++ b/docs/source/api.rst @@ -0,0 +1,10 @@ +API Reference +============= + +.. autosummary:: + :toctree: api + :template: custom-module-template.rst + :recursive: + + iblrig + iblrig_tasks diff --git a/docs/source/conf.py b/docs/source/conf.py index 4b87d1fc4..e33cc90e6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,7 +1,10 @@ +import os +import sys from datetime import date +sys.path.insert(0, os.path.abspath('../..')) from iblrig import __version__ -from iblrig.constants import BASE_PATH + project = 'iblrig' copyright = f'2018 – {date.today().year} International Brain Laboratory' @@ -12,21 +15,41 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = ['sphinx_lesson', 'sphinx.ext.autosectionlabel', 'sphinx_simplepdf'] +templates_path = ['_templates'] +extensions = [ + 'sphinx_lesson', + 'sphinx.ext.autosectionlabel', + 'sphinx_simplepdf', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.inheritance_diagram', + 'sphinx.ext.viewcode', +] autosectionlabel_prefix_document = True source_suffix = ['.rst', '.md'] - -templates_path = ['_templates'] exclude_patterns = [] - +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'pandas': ('https://pandas.pydata.org/docs/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/', None), + 'one:': ('https://int-brain-lab.github.io/ONE/', None), + 'pydantic': ('https://docs.pydantic.dev/latest/', None), + 'iblenv': ('https://int-brain-lab.github.io/iblenv/', None), + 'pyserial': ('https://pyserial.readthedocs.io/en/latest/', None), + 'Sphinx': ('https://www.sphinx-doc.org/en/master/', None), +} # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = 'sphinx_rtd_theme' -html_static_path = ['_static'] - +# -- Options for PDF creation ------------------------------------------------ simplepdf_vars = { 'primary': '#004f8c', 'secondary': '#004f8c', @@ -39,3 +62,31 @@ 'docs_scope': 'external', 'cover_meta_data': 'International Brain Laboratory', } + +# -- Settings for automatic API generation ----------------------------------- +autodoc_mock_imports = ["PySpin"] +autodoc_class_signature = 'separated' # 'mixed', 'separated' +autodoc_member_order = 'groupwise' # 'alphabetical', 'groupwise', 'bysource' +autodoc_inherit_docstrings = False +autodoc_typehints = 'description' # 'description', 'signature', 'none', 'both' +autodoc_typehints_description_target = 'all' # 'all', 'documented', 'documented_params' +autodoc_typehints_format = 'short' # 'fully-qualified', 'short' + +autosummary_generate = True +autosummary_imported_members = False + +napoleon_google_docstring = False +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = False +napoleon_use_admonition_for_examples = True +napoleon_use_admonition_for_notes = True +napoleon_use_admonition_for_references = True +napoleon_use_ivar = False +napoleon_use_param = False +napoleon_use_rtype = True +napoleon_use_keyword = True +napoleon_preprocess_types = True +napoleon_type_aliases = None +napoleon_attr_annotations = False diff --git a/docs/source/faq.rst b/docs/source/faq.rst index 480b2cf16..8a98a9134 100644 --- a/docs/source/faq.rst +++ b/docs/source/faq.rst @@ -37,11 +37,13 @@ Sound Issues * Is ``hardware_settings.yaml`` set up correctly? Valid options for sound ``OUTPUT`` are: + - ``hifi``, - ``harp``, - ``xonar``, or - ``sysdefault``. Make sure that this value matches the actual soundcard used on your rig. + Note that ``sysdefault`` is only used in test scenarios and should not be used during actual experiments. Screen Issues diff --git a/docs/source/index.rst b/docs/source/index.rst index c043419f9..382286582 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -19,6 +19,7 @@ .. toctree:: :hidden: + api changelog .. toctree:: diff --git a/docs/source/reference_write_your_own_task.rst b/docs/source/reference_write_your_own_task.rst index 94871fc2b..b61a2a8ea 100644 --- a/docs/source/reference_write_your_own_task.rst +++ b/docs/source/reference_write_your_own_task.rst @@ -8,13 +8,13 @@ During the lifetime of the IBL project, we realized that multiple task variants This left us with the only option of developing a flexible task framework through hierarchical inheritance. -All tasks inherit from the ``iblrig.base_tasks.BaseSession`` class, which provides the following functionalities: +All tasks inherit from the :class:`iblrig.base_tasks.BaseSession` class, which provides the following functionalities: - read hardware parameters and rig parameters - optionally interfaces with the `Alyx experimental database `_ - creates the folder structure for the session - - writes the task and rig parameters, log, and :doc:`acquisition description files <../description_file>` + - writes the task and rig parameters, log, and :doc:`acquisition description files <../reference_description_file>` -Additionally the ``iblrig.base_tasks`` module provides "hardware mixins". Those are classes that provide hardware-specific functionalities, such as connecting to a Bpod or a rotary encoder. They are composed with the ``BaseSession`` class to create a task. +Additionally the :mod:`iblrig.base_tasks` module provides "hardware mixins". Those are classes that provide hardware-specific functionalities, such as connecting to a Bpod or a rotary encoder. They are composed with the :class:`~.iblrig.base_tasks.BaseSession` class to create a task. .. warning:: @@ -33,21 +33,21 @@ What Happens When Running an IBL Task? - Reading of task parameters. - Instantiation of hardware mixins. -2. The task initiates the ``run()`` method. Prior to execution, this +2. The task initiates the :meth:`~.iblrig.base_tasks.run` method. Prior to execution, this method: - Launches the hardware modules. - Establishes a session folder. - Saves the parameters to disk. -3. The experiment unfolds: the ``run()`` method triggers the ``_run()`` +3. The experiment unfolds: the :meth:`~.iblrig.base_tasks.run` method triggers the ``_run()`` method within the child class: - Typically, this involves a loop that generates a Bpod state machine for each trial and runs it. 4. Upon SIGINT or when the maximum trial count is reached, the - experiment concludes. The end of the ``run()`` method includes: + experiment concludes. The end of the :meth:`~.iblrig.base_tasks.run` method includes: - Saving the final parameter file. - Recording administered water and session performance on Alyx. diff --git a/iblrig/__init__.py b/iblrig/__init__.py index 45e724ad2..339e36d72 100644 --- a/iblrig/__init__.py +++ b/iblrig/__init__.py @@ -6,7 +6,7 @@ # 5) git tag the release in accordance to the version number below (after merge!) # >>> git tag 8.15.6 # >>> git push origin --tags -__version__ = '8.23.1' +__version__ = '8.24.0' from iblrig.version_management import get_detailed_version_string diff --git a/iblrig/base_choice_world.py b/iblrig/base_choice_world.py index fbc772a26..fa1570e41 100644 --- a/iblrig/base_choice_world.py +++ b/iblrig/base_choice_world.py @@ -1,7 +1,6 @@ """Extends the base_tasks modules by providing task logic around the Choice World protocol.""" import abc -import json import logging import math import random @@ -9,14 +8,18 @@ import time from pathlib import Path from string import ascii_letters +from typing import Annotated, Any import numpy as np import pandas as pd +from annotated_types import Interval, IsNan +from pydantic import NonNegativeFloat, NonNegativeInt import iblrig.base_tasks import iblrig.graphic from iblrig import choiceworld, misc from iblrig.hardware import SOFTCODE +from iblrig.pydantic_definitions import TrialDataModel from iblutil.io import jsonable from iblutil.util import Bunch from pybpodapi.com.messaging.trial import Trial @@ -72,6 +75,33 @@ # WHITE_NOISE_IDX: int = 3 +class ChoiceWorldTrialData(TrialDataModel): + """Pydantic Model for Trial Data.""" + + contrast: Annotated[float, Interval(ge=0.0, le=1.0)] + stim_probability_left: Annotated[float, Interval(ge=0.0, le=1.0)] + position: float + quiescent_period: NonNegativeFloat + reward_amount: NonNegativeFloat + reward_valve_time: NonNegativeFloat + stim_angle: Annotated[float, Interval(ge=-180.0, le=180.0)] + stim_freq: NonNegativeFloat + stim_gain: float + stim_phase: Annotated[float, Interval(ge=0.0, le=2 * math.pi)] + stim_reverse: bool + stim_sigma: float + trial_num: NonNegativeInt + pause_duration: NonNegativeFloat = 0.0 + + # The following variables are only used in ActiveChoiceWorld + # We keep them here with fixed default values for sake of compatibility + # + # TODO: Yes, this should probably be done differently. + response_side: Annotated[int, Interval(ge=0, le=0)] = 0 + response_time: IsNan[float] = np.nan + trial_correct: Annotated[int, Interval(ge=0, le=0)] = False + + class ChoiceWorldSession( iblrig.base_tasks.BonsaiRecordingMixin, iblrig.base_tasks.BonsaiVisualStimulusMixin, @@ -84,6 +114,7 @@ class ChoiceWorldSession( ): # task_params = ChoiceWorldParams() base_parameters_file = Path(__file__).parent.joinpath('base_choice_world_params.yaml') + TrialDataModel = ChoiceWorldTrialData def __init__(self, *args, delay_secs=0, **kwargs): super().__init__(**kwargs) @@ -96,27 +127,7 @@ def __init__(self, *args, delay_secs=0, **kwargs): self.block_num = -1 self.block_trial_num = -1 # init the tables, there are 2 of them: a trials table and a ambient sensor data table - self.trials_table = pd.DataFrame( - { - 'contrast': np.zeros(NTRIALS_INIT) * np.NaN, - 'position': np.zeros(NTRIALS_INIT) * np.NaN, - 'quiescent_period': np.zeros(NTRIALS_INIT) * np.NaN, - 'response_side': np.zeros(NTRIALS_INIT, dtype=np.int8), - 'response_time': np.zeros(NTRIALS_INIT) * np.NaN, - 'reward_amount': np.zeros(NTRIALS_INIT) * np.NaN, - 'reward_valve_time': np.zeros(NTRIALS_INIT) * np.NaN, - 'stim_angle': np.zeros(NTRIALS_INIT) * np.NaN, - 'stim_freq': np.zeros(NTRIALS_INIT) * np.NaN, - 'stim_gain': np.zeros(NTRIALS_INIT) * np.NaN, - 'stim_phase': np.zeros(NTRIALS_INIT) * np.NaN, - 'stim_reverse': np.zeros(NTRIALS_INIT, dtype=bool), - 'stim_sigma': np.zeros(NTRIALS_INIT) * np.NaN, - 'trial_correct': np.zeros(NTRIALS_INIT, dtype=bool), - 'trial_num': np.zeros(NTRIALS_INIT, dtype=np.int16), - 'pause_duration': np.zeros(NTRIALS_INIT, dtype=float), - } - ) - + self.trials_table = self.TrialDataModel.preallocate_dataframe(NTRIALS_INIT) self.ambient_sensor_table = pd.DataFrame( { 'Temperature_C': np.zeros(NTRIALS_INIT) * np.NaN, @@ -200,6 +211,7 @@ def _run(self): self.trials_table.at[self.trial_num, 'pause_duration'] = time.time() - time_last_trial_end if not flag_stop.exists(): log.info('Resuming session') + # save trial and update log self.trial_completed(self.bpod.session.current_trial.export()) self.ambient_sensor_table.loc[i] = self.bpod.get_ambient_sensor_reading() @@ -298,6 +310,7 @@ def _instantiate_state_machine(self, *args, **kwargs): def get_state_machine_trial(self, i): # we define the trial number here for subclasses that may need it sma = self._instantiate_state_machine(trial_number=i) + if i == 0: # First trial exception start camera session_delay_start = self.task_params.get('SESSION_DELAY_START', 0) log.info('First trial initializing, will move to next trial only if:') @@ -323,6 +336,10 @@ def get_state_machine_trial(self, i): output_actions=[self.bpod.actions.stop_sound, ('BNC1', 255)], ) # stop all sounds + # Reset the rotary encoder by sending the following opcodes via the modules serial interface + # - 'Z' (ASCII 90): Set current rotary encoder position to zero + # - 'E' (ASCII 69): Enable all position thresholds (that may have been disabled by a threshold-crossing) + # cf. https://sanworks.github.io/Bpod_Wiki/serial-interfaces/rotary-encoder-module-serial-interface/ sma.add_state( state_name='reset_rotary_encoder', state_timer=0, @@ -330,7 +347,9 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'quiescent_period'}, ) - sma.add_state( # '>back' | '>reset_timer' + # Quiescent Period. If the wheel is moved past one of the thresholds: Reset the rotary encoder and start over. + # Continue with the stimulation once the quiescent period has passed without triggering movement thresholds. + sma.add_state( state_name='quiescent_period', state_timer=self.quiescent_period, output_actions=[], @@ -340,21 +359,26 @@ def get_state_machine_trial(self, i): self.movement_right: 'reset_rotary_encoder', }, ) - # show stimulus, move on to next state if a frame2ttl is detected, with a time-out of 0.1s + + # Show the visual stimulus. This is achieved by sending a time-stamped byte-message to Bonsai via the Rotary + # Encoder Module's ongoing USB-stream. Move to the next state once the Frame2TTL has been triggered, i.e., + # when the stimulus has been rendered on screen. Use the state-timer as a backup to prevent a stall. sma.add_state( state_name='stim_on', state_timer=0.1, output_actions=[self.bpod.actions.bonsai_show_stim], - state_change_conditions={'Tup': 'interactive_delay', 'BNC1High': 'interactive_delay', 'BNC1Low': 'interactive_delay'}, + state_change_conditions={'BNC1High': 'interactive_delay', 'BNC1Low': 'interactive_delay', 'Tup': 'interactive_delay'}, ) - # this is a feature that can eventually add a delay between visual and auditory cue + + # Defined delay between visual and auditory cue sma.add_state( state_name='interactive_delay', state_timer=self.task_params.INTERACTIVE_DELAY, output_actions=[], state_change_conditions={'Tup': 'play_tone'}, ) - # play tone, move on to next state if sound is detected, with a time-out of 0.1s + + # Play tone. Move to next state if sound is detected. Use the state-timer as a backup to prevent a stall. sma.add_state( state_name='play_tone', state_timer=0.1, @@ -362,13 +386,20 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'reset2_rotary_encoder', 'BNC2High': 'reset2_rotary_encoder'}, ) + # Reset rotary encoder (see above). Move on after brief delay (to avoid a race conditions in the bonsai flow). sma.add_state( state_name='reset2_rotary_encoder', - state_timer=0.05, # the delay here is to avoid race conditions in the bonsai flow + state_timer=0.05, output_actions=[self.bpod.actions.rotary_encoder_reset], state_change_conditions={'Tup': 'closed_loop'}, ) + # Start the closed loop state in which the animal controls the position of the visual stimulus by means of the + # rotary encoder. The three possible outcomes are: + # 1) wheel has NOT been moved past a threshold: continue with no-go condition + # 2) wheel has been moved in WRONG direction: continue with error condition + # 3) wheel has been moved in CORRECT direction: continue with reward condition + sma.add_state( state_name='closed_loop', state_timer=self.task_params.RESPONSE_WINDOW, @@ -376,6 +407,7 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'no_go', self.event_error: 'freeze_error', self.event_reward: 'freeze_reward'}, ) + # No-go: hide the visual stimulus and play white noise. Go to exit_state after FEEDBACK_NOGO_DELAY_SECS. sma.add_state( state_name='no_go', state_timer=self.task_params.FEEDBACK_NOGO_DELAY_SECS, @@ -383,13 +415,14 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'exit_state'}, ) + # Error: Freeze the stimulus and play white noise. + # Continue to hide_stim/exit_state once FEEDBACK_ERROR_DELAY_SECS have passed. sma.add_state( state_name='freeze_error', state_timer=0, output_actions=[self.bpod.actions.bonsai_freeze_stim], state_change_conditions={'Tup': 'error'}, ) - sma.add_state( state_name='error', state_timer=self.task_params.FEEDBACK_ERROR_DELAY_SECS, @@ -397,20 +430,20 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'hide_stim'}, ) + # Reward: open the valve for a defined duration (and set BNC1 to high), freeze stimulus in center of screen. + # Continue to hide_stim/exit_state once FEEDBACK_CORRECT_DELAY_SECS have passed. sma.add_state( state_name='freeze_reward', state_timer=0, output_actions=[self.bpod.actions.bonsai_show_center], state_change_conditions={'Tup': 'reward'}, ) - sma.add_state( state_name='reward', state_timer=self.reward_time, output_actions=[('Valve1', 255), ('BNC1', 255)], state_change_conditions={'Tup': 'correct'}, ) - sma.add_state( state_name='correct', state_timer=self.task_params.FEEDBACK_CORRECT_DELAY_SECS - self.reward_time, @@ -418,6 +451,9 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'hide_stim'}, ) + # Hide the visual stimulus. This is achieved by sending a time-stamped byte-message to Bonsai via the Rotary + # Encoder Module's ongoing USB-stream. Move to the next state once the Frame2TTL has been triggered, i.e., + # when the stimulus has been rendered on screen. Use the state-timer as a backup to prevent a stall. sma.add_state( state_name='hide_stim', state_timer=0.1, @@ -425,12 +461,14 @@ def get_state_machine_trial(self, i): state_change_conditions={'Tup': 'exit_state', 'BNC1High': 'exit_state', 'BNC1Low': 'exit_state'}, ) + # Wait for ITI_DELAY_SECS before ending the trial. Raise BNC1 to mark this event. sma.add_state( state_name='exit_state', state_timer=self.task_params.ITI_DELAY_SECS, output_actions=[('BNC1', 255)], state_change_conditions={'Tup': 'exit'}, ) + return sma @abc.abstractmethod @@ -441,20 +479,18 @@ def next_trial(self): def default_reward_amount(self): return self.task_params.REWARD_AMOUNT_UL - def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None, reward_amount=None): + def draw_next_trial_info(self, pleft=0.5, **kwargs): """Draw next trial variables. calls :meth:`send_trial_info_to_bonsai`. This is called by the `next_trial` method before updating the Bpod state machine. """ - if contrast is None: - contrast = misc.draw_contrast(self.task_params.CONTRAST_SET, self.task_params.CONTRAST_SET_PROBABILITY_TYPE) assert len(self.task_params.STIM_POSITIONS) == 2, 'Only two positions are supported' - position = position or int(np.random.choice(self.task_params.STIM_POSITIONS, p=[pleft, 1 - pleft])) + contrast = misc.draw_contrast(self.task_params.CONTRAST_SET, self.task_params.CONTRAST_SET_PROBABILITY_TYPE) + position = int(np.random.choice(self.task_params.STIM_POSITIONS, p=[pleft, 1 - pleft])) quiescent_period = self.task_params.QUIESCENT_PERIOD + misc.truncated_exponential( scale=0.35, min_value=0.2, max_value=0.5 ) - reward_amount = self.default_reward_amount if reward_amount is None else reward_amount stim_gain = ( self.session_info.ADAPTIVE_GAIN_VALUE if self.task_params.get('ADAPTIVE_GAIN', False) else self.task_params.STIM_GAIN ) @@ -468,11 +504,18 @@ def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None, reward_a self.trials_table.at[self.trial_num, 'stim_reverse'] = self.task_params.STIM_REVERSE self.trials_table.at[self.trial_num, 'trial_num'] = self.trial_num self.trials_table.at[self.trial_num, 'position'] = position - self.trials_table.at[self.trial_num, 'reward_amount'] = reward_amount + self.trials_table.at[self.trial_num, 'reward_amount'] = self.default_reward_amount self.trials_table.at[self.trial_num, 'stim_probability_left'] = pleft + + # use the kwargs dict to override computed values + for key, value in kwargs.items(): + if key == 'index': + pass + self.trials_table.at[self.trial_num, key] = value + self.send_trial_info_to_bonsai() - def trial_completed(self, bpod_data): + def trial_completed(self, bpod_data: dict[str, Any]) -> None: # if the reward state has not been triggered, null the reward if np.isnan(bpod_data['States timestamps']['reward'][0][0]): self.trials_table.at[self.trial_num, 'reward_amount'] = 0 @@ -481,11 +524,7 @@ def trial_completed(self, bpod_data): self.session_info.TOTAL_WATER_DELIVERED += self.trials_table.at[self.trial_num, 'reward_amount'] self.session_info.NTRIALS += 1 # SAVE TRIAL DATA - save_dict = self.trials_table.iloc[self.trial_num].to_dict() - save_dict['behavior_data'] = bpod_data - # Dump and save - with open(self.paths['DATA_FILE_PATH'], 'a') as fp: - fp.write(json.dumps(save_dict) + '\n') + self.save_trial_data_to_json(bpod_data) # this is a flag for the online plots. If online plots were in pyqt5, there is a file watcher functionality Path(self.paths['DATA_FILE_PATH']).parent.joinpath('new_trial.flag').touch() self.paths.SESSION_FOLDER.joinpath('transfer_me.flag').touch() @@ -503,19 +542,55 @@ def check_sync_pulses(self, bpod_data): if not misc.get_port_events(events, name='Port1'): log.warning("NO CAMERA SYNC PULSES RECEIVED ON BPOD'S BEHAVIOR PORT 1") - def show_trial_log(self, extra_info='', log_level: int = logging.INFO): + def show_trial_log(self, extra_info: dict[str, Any] | None = None, log_level: int = logging.INFO): + """ + Log the details of the current trial. + + This method retrieves information about the current trial from the + trials table and logs it. It can also incorporate additional information + provided through the `extra_info` parameter. + + Parameters + ---------- + extra_info : dict[str, Any], optional + A dictionary containing additional information to include in the + log. + + log_level : int, optional + The logging level to use when logging the trial information. + Default is logging.INFO. + + Notes + ----- + When overloading, make sure to call the super class and pass additional + log items by means of the extra_info parameter. See the implementation + of :py:meth:`~iblrig.base_choice_world.ActiveChoiceWorldSession.show_trial_log` in + :mod:`~iblrig.base_choice_world.ActiveChoiceWorldSession` for reference. + """ + # construct base info dict trial_info = self.trials_table.iloc[self.trial_num] - + info_dict = { + 'Stim. Position': trial_info.position, + 'Stim. Contrast': trial_info.contrast, + 'Stim. Phase': f'{trial_info.stim_phase:.2f}', + 'Stim. p Left': trial_info.stim_probability_left, + 'Water delivered': f'{self.session_info.TOTAL_WATER_DELIVERED:.1f} µl', + 'Time from Start': self.time_elapsed, + 'Temperature': f'{self.ambient_sensor_table.loc[self.trial_num, "Temperature_C"]:.1f} °C', + 'Air Pressure': f'{self.ambient_sensor_table.loc[self.trial_num, "AirPressure_mb"]:.1f} mb', + 'Rel. Humidity': f'{self.ambient_sensor_table.loc[self.trial_num, "RelativeHumidity"]:.1f} %', + } + + # update info dict with extra_info dict + if isinstance(extra_info, dict): + info_dict.update(extra_info) + + # log info dict log.log(log_level, f'Outcome of Trial #{trial_info.trial_num}:') - log.log(log_level, f'- Stim. Position: {trial_info.position}') - log.log(log_level, f'- Stim. Contrast: {trial_info.contrast}') - log.log(log_level, f'- Stim. Phase: {trial_info.stim_phase}') - log.log(log_level, f'- Stim. p Left: {trial_info.stim_probability_left}') - log.log(log_level, f'- Water delivered: {self.session_info.TOTAL_WATER_DELIVERED:.1f} µl') - log.log(log_level, f'- Time from Start: {self.time_elapsed}') - log.log(log_level, f'- Temperature: {self.ambient_sensor_table.loc[self.trial_num, "Temperature_C"]:.1f} °C') - log.log(log_level, f'- Air Pressure: {self.ambient_sensor_table.loc[self.trial_num, "AirPressure_mb"]:.1f} mb') - log.log(log_level, f'- Rel. Humidity: {self.ambient_sensor_table.loc[self.trial_num, "RelativeHumidity"]:.1f} %\n') + max_key_length = max(len(key) for key in info_dict) + for key, value in info_dict.items(): + spaces = (max_key_length - len(key)) * ' ' + log.log(log_level, f'- {key}: {spaces}{str(value)}') @property def iti_reward(self): @@ -550,12 +625,15 @@ def event_reward(self): return self.device_rotary_encoder.THRESHOLD_EVENTS[(1 if self.task_params.STIM_REVERSE else -1) * self.position] +class HabituationChoiceWorldTrialData(ChoiceWorldTrialData): + """Pydantic Model for Trial Data, extended from :class:`~.iblrig.base_choice_world.ChoiceWorldTrialData`.""" + + delay_to_stim_center: NonNegativeFloat + + class HabituationChoiceWorldSession(ChoiceWorldSession): protocol_name = '_iblrig_tasks_habituationChoiceWorld' - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.trials_table['delay_to_stim_center'] = np.zeros(NTRIALS_INIT) * np.NaN + TrialDataModel = HabituationChoiceWorldTrialData def next_trial(self): self.trial_num += 1 @@ -620,10 +698,19 @@ def get_state_machine_trial(self, i): return sma +class ActiveChoiceWorldTrialData(ChoiceWorldTrialData): + """Pydantic Model for Trial Data, extended from :class:`~.iblrig.base_choice_world.ChoiceWorldTrialData`.""" + + response_side: Annotated[int, Interval(ge=-1, le=1)] + response_time: NonNegativeFloat + trial_correct: bool + + class ActiveChoiceWorldSession(ChoiceWorldSession): """ The ActiveChoiceWorldSession is a base class for protocols where the mouse is actively making decisions by turning the wheel. It has the following characteristics + - it is trial based - it is decision based - left and right simulus are equiprobable: there is no biased block @@ -634,6 +721,8 @@ class ActiveChoiceWorldSession(ChoiceWorldSession): The TrainingChoiceWorld, BiasedChoiceWorld are all subclasses of this class """ + TrialDataModel = ActiveChoiceWorldTrialData + def __init__(self, **kwargs): super().__init__(**kwargs) self.trials_table['stim_probability_left'] = np.zeros(NTRIALS_INIT, dtype=np.float64) @@ -648,25 +737,32 @@ def _run(self): ) super()._run() - def show_trial_log(self, extra_info=''): + def show_trial_log(self, extra_info: dict[str, Any] | None = None, log_level: int = logging.INFO): + # construct info dict trial_info = self.trials_table.iloc[self.trial_num] - extra_info = f""" -RESPONSE TIME: {trial_info.response_time} -{extra_info} + info_dict = { + 'Response Time': f'{trial_info.response_time:.2f} s', + 'Trial Correct': trial_info.trial_correct, + 'N Trials Correct': self.session_info.NTRIALS_CORRECT, + 'N Trials Error': self.trial_num - self.session_info.NTRIALS_CORRECT, + } -TRIAL CORRECT: {trial_info.trial_correct} -NTRIALS CORRECT: {self.session_info.NTRIALS_CORRECT} -NTRIALS ERROR: {self.trial_num - self.session_info.NTRIALS_CORRECT} - """ - super().show_trial_log(extra_info=extra_info) + # update info dict with extra_info dict + if isinstance(extra_info, dict): + info_dict.update(extra_info) + + # call parent method + super().show_trial_log(extra_info=info_dict, log_level=log_level) def trial_completed(self, bpod_data): """ The purpose of this method is to - - update the trials table with information about the behaviour coming from the bpod - Constraints on the state machine data: + + - update the trials table with information about the behaviour coming from the bpod + Constraints on the state machine data: - mandatory states: ['correct', 'error', 'no_go', 'reward'] - optional states : ['omit_correct', 'omit_error', 'omit_no_go'] + :param bpod_data: :return: """ @@ -680,8 +776,8 @@ def trial_completed(self, bpod_data): outcome = next(k for k in raw_outcome if raw_outcome[k]) # Update response buffer -1 for left, 0 for nogo, and 1 for rightward position = self.trials_table.at[self.trial_num, 'position'] + self.trials_table.at[self.trial_num, 'trial_correct'] = 'correct' in outcome if 'correct' in outcome: - self.trials_table.at[self.trial_num, 'trial_correct'] = True self.session_info.NTRIALS_CORRECT += 1 self.trials_table.at[self.trial_num, 'response_side'] = -np.sign(position) elif 'error' in outcome: @@ -704,6 +800,13 @@ def trial_completed(self, bpod_data): raise e +class BiasedChoiceWorldTrialData(ActiveChoiceWorldTrialData): + """Pydantic Model for Trial Data, extended from :class:`~.iblrig.base_choice_world.ChoiceWorldTrialData`.""" + + block_num: NonNegativeInt = 0 + block_trial_num: NonNegativeInt = 0 + + class BiasedChoiceWorldSession(ActiveChoiceWorldSession): """ Biased choice world session is the instantiation of ActiveChoiceWorld where the notion of biased @@ -712,14 +815,13 @@ class BiasedChoiceWorldSession(ActiveChoiceWorldSession): base_parameters_file = Path(__file__).parent.joinpath('base_biased_choice_world_params.yaml') protocol_name = '_iblrig_tasks_biasedChoiceWorld' + TrialDataModel = BiasedChoiceWorldTrialData def __init__(self, **kwargs): super().__init__(**kwargs) self.blocks_table = pd.DataFrame( {'probability_left': np.zeros(NBLOCKS_INIT) * np.NaN, 'block_length': np.zeros(NBLOCKS_INIT, dtype=np.int16) * -1} ) - self.trials_table['block_num'] = np.zeros(NTRIALS_INIT, dtype=np.int16) - self.trials_table['block_trial_num'] = np.zeros(NTRIALS_INIT, dtype=np.int16) def new_block(self): """ @@ -765,14 +867,28 @@ def next_trial(self): # save and send trial info to bonsai self.draw_next_trial_info(pleft=pleft) - def show_trial_log(self): + def show_trial_log(self, extra_info: dict[str, Any] | None = None, log_level: int = logging.INFO): + # construct info dict trial_info = self.trials_table.iloc[self.trial_num] - extra_info = f""" -BLOCK NUMBER: {trial_info.block_num} -BLOCK LENGTH: {self.blocks_table.loc[self.block_num, 'block_length']} -TRIALS IN BLOCK: {trial_info.block_trial_num} - """ - super().show_trial_log(extra_info=extra_info) + info_dict = { + 'Block Number': trial_info.block_num, + 'Block Length': self.blocks_table.loc[self.block_num, 'block_length'], + 'N Trials in Block': trial_info.block_trial_num, + } + + # update info dict with extra_info dict + if isinstance(extra_info, dict): + info_dict.update(extra_info) + + # call parent method + super().show_trial_log(extra_info=info_dict, log_level=log_level) + + +class TrainingChoiceWorldTrialData(ActiveChoiceWorldTrialData): + """Pydantic Model for Trial Data, extended from :class:`~.iblrig.base_choice_world.ActiveChoiceWorldTrialData`.""" + + training_phase: NonNegativeInt + debias_trial: bool class TrainingChoiceWorldSession(ActiveChoiceWorldSession): @@ -783,6 +899,7 @@ class TrainingChoiceWorldSession(ActiveChoiceWorldSession): """ protocol_name = '_iblrig_tasks_trainingChoiceWorld' + TrialDataModel = TrainingChoiceWorldTrialData def __init__(self, training_phase=-1, adaptive_reward=-1.0, adaptive_gain=None, **kwargs): super().__init__(**kwargs) @@ -806,8 +923,6 @@ def __init__(self, training_phase=-1, adaptive_reward=-1.0, adaptive_gain=None, log.critical(f'Adaptive gain manually set to {adaptive_gain} degrees/mm') self.session_info['ADAPTIVE_GAIN_VALUE'] = adaptive_gain self.var = {'training_phase_trial_counts': np.zeros(6), 'last_10_responses_sides': np.zeros(10)} - self.trials_table['training_phase'] = np.zeros(NTRIALS_INIT, dtype=np.int8) - self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool) @property def default_reward_amount(self): @@ -838,7 +953,7 @@ def get_subject_training_info(self): def compute_performance(self): """Aggregate the trials table to compute the performance of the mouse on each contrast.""" - self.trials_table['signed_contrast'] = self.trials_table['contrast'] * np.sign(self.trials_table['position']) + self.trials_table['signed_contrast'] = self.trials_table.contrast * self.trials_table.position performance = self.trials_table.groupby(['signed_contrast']).agg( last_50_perf=pd.NamedAgg(column='trial_correct', aggfunc=lambda x: np.sum(x[np.maximum(-50, -x.size) :]) / 50), ntrials=pd.NamedAgg(column='trial_correct', aggfunc='count'), @@ -889,13 +1004,22 @@ def next_trial(self): position = self.task_params.STIM_POSITIONS[int(np.random.normal(average_right, 0.5) >= 0.5)] # contrast is the last contrast contrast = last_contrast + else: + self.trials_table.at[self.trial_num, 'debias_trial'] = False # save and send trial info to bonsai self.draw_next_trial_info(pleft=self.task_params.PROBABILITY_LEFT, position=position, contrast=contrast) self.trials_table.at[self.trial_num, 'training_phase'] = self.training_phase - def show_trial_log(self): - extra_info = f""" -CONTRAST SET: {np.unique(np.abs(choiceworld.contrasts_set(self.training_phase)))} -SUBJECT TRAINING PHASE (0-5): {self.training_phase} - """ - super().show_trial_log(extra_info=extra_info) + def show_trial_log(self, extra_info: dict[str, Any] | None = None, log_level: int = logging.INFO): + # construct info dict + info_dict = { + 'Contrast Set': np.unique(np.abs(choiceworld.contrasts_set(self.training_phase))), + 'Training Phase': self.training_phase, + } + + # update info dict with extra_info dict + if isinstance(extra_info, dict): + info_dict.update(extra_info) + + # call parent method + super().show_trial_log(extra_info=info_dict, log_level=log_level) diff --git a/iblrig/base_tasks.py b/iblrig/base_tasks.py index ab3aa755f..1d64c571f 100644 --- a/iblrig/base_tasks.py +++ b/iblrig/base_tasks.py @@ -5,7 +5,6 @@ This module tries to exclude task related logic. """ -import abc import argparse import contextlib import datetime @@ -17,10 +16,11 @@ import sys import time import traceback -from abc import ABC +from abc import ABC, abstractmethod from collections import OrderedDict from collections.abc import Callable from pathlib import Path +from typing import Protocol, final import numpy as np import pandas as pd @@ -30,18 +30,17 @@ from pythonosc import udp_client import ibllib.io.session_params as ses_params -import iblrig import iblrig.graphic as graph import iblrig.path_helper import pybpodapi from ibllib.oneibl.registration import IBLRegistrationClient -from iblrig import net, sound +from iblrig import net, path_helper, sound from iblrig.constants import BASE_PATH, BONSAI_EXE, PYSPIN_AVAILABLE from iblrig.frame2ttl import Frame2TTL from iblrig.hardware import SOFTCODE, Bpod, MyRotaryEncoder, sound_device_factory from iblrig.hifi import HiFi from iblrig.path_helper import load_pydantic_yaml -from iblrig.pydantic_definitions import HardwareSettings, RigSettings +from iblrig.pydantic_definitions import HardwareSettings, RigSettings, TrialDataModel from iblrig.tools import call_bonsai from iblrig.transfer_experiments import BehaviorCopier, VideoCopier from iblutil.io.net.base import ExpMessage @@ -56,10 +55,14 @@ log = logging.getLogger(__name__) +class HasBpod(Protocol): + bpod: Bpod + + class BaseSession(ABC): version = None """str: !!CURRENTLY UNUSED!! task version string.""" - protocol_name: str | None = None + # protocol_name: str | None = None """str: The name of the task protocol (NB: avoid spaces).""" base_parameters_file: Path | None = None """Path: A YAML file containing base, default task parameters.""" @@ -72,6 +75,12 @@ class BaseSession(ABC): extractor_tasks: list | None = None """list of str: An optional list of pipeline task class names to instantiate when preprocessing task data.""" + TrialDataModel: type[TrialDataModel] + + @property + @abstractmethod + def protocol_name(self) -> str: ... + def __init__( self, subject=None, @@ -107,7 +116,6 @@ def __init__( :param append: bool, if True, append to the latest existing session of the same subject for the same day """ self.extractor_tasks = getattr(self, 'extractor_tasks', None) - assert self.protocol_name is not None, 'Protocol name must be defined by the child class' self._logger = None self._setup_loggers(level=log_level) if not isinstance(self, EmptySession): @@ -260,7 +268,7 @@ def _init_paths(self, append: bool = False) -> Bunch: * SETTINGS_FILE_PATH: contains the task settings `C:\iblrigv8_data\mainenlab\Subjects\SWC_043\2019-01-01\001\raw_task_data_00\_iblrig_taskSettings.raw.json` """ - rig_computer_paths = iblrig.path_helper.get_local_and_remote_paths( + rig_computer_paths = path_helper.get_local_and_remote_paths( local_path=self.iblrig_settings.iblrig_local_data_path, remote_path=self.iblrig_settings.iblrig_remote_data_path, lab=self.iblrig_settings.ALYX_LAB, @@ -313,7 +321,8 @@ def _setup_loggers(self, level='INFO', level_bpod='WARNING', file=None): self._logger = setup_logger('iblrig', level=level, file=file) # logger attr used by create_session to determine log level setup_logger('pybpodapi', level=level_bpod, file=file) - def _remove_file_loggers(self): + @staticmethod + def _remove_file_loggers(): for logger_name in ['iblrig', 'pybpodapi']: logger = logging.getLogger(logger_name) file_handlers = [fh for fh in logger.handlers if isinstance(fh, logging.FileHandler)] @@ -440,6 +449,40 @@ def save_task_parameters_to_json_file(self, destination_folder: Path | None = No json.dump(output_dict, outfile, indent=4, sort_keys=True, default=str) # converts datetime objects to string return json_file # PosixPath + @final + def save_trial_data_to_json(self, bpod_data: dict): + """Validate and save trial data. + + This method retrieve's the current trial's data from the trial_table and validates it using a Pydantic model + (self.TrialDataDefinition). In merges in the trial's bpod_data dict and appends everything to the session's + JSON data file. + + Parameters + ---------- + bpod_data : dict + Trial data returned from pybpod. + """ + # get trial's data as a dict + trial_data = self.trials_table.iloc[self.trial_num].to_dict() + + # warn about entries not covered by pydantic model + if trial_data.get('trial_num', 1) == 0: + for key in set(trial_data.keys()) - set(self.TrialDataModel.model_fields) - {'index'}: + log.warning( + f'Key "{key}" in trial_data is missing from TrialDataModel - ' + f'its value ({trial_data[key]}) will not be validated.' + ) + + # validate by passing through pydantic model + trial_data = self.TrialDataModel.model_validate(trial_data).model_dump() + + # add bpod_data as 'behavior_data' + trial_data['behavior_data'] = bpod_data + + # write json data to file + with open(self.paths['DATA_FILE_PATH'], 'a') as fp: + fp.write(json.dumps(trial_data) + '\n') + @property def one(self): """ONE getter.""" @@ -491,7 +534,7 @@ def register_to_alyx(self): See Also -------- - ibllib.oneibl.IBLRegistrationClient.register_session - The registration method. + :external+iblenv:meth:`ibllib.oneibl.registration.IBLRegistrationClient.register_session` - The registration method. """ if self.session_info['SUBJECT_NAME'] in ('iblrig_test_subject', 'test', 'test_subject'): log.warning('Not registering test subject to Alyx') @@ -598,7 +641,7 @@ def sigint_handler(*args, **kwargs): self._execute_mixins_shared_function('stop_mixin') self._execute_mixins_shared_function('cleanup_mixin') - @abc.abstractmethod + @abstractmethod def start_hardware(self): """ Start the hardware. @@ -606,11 +649,10 @@ def start_hardware(self): This method doesn't explicitly start the mixins as the order has to be defined in the child classes. This needs to be implemented in the child classes, and should start and connect to all hardware pieces. """ - pass + ... - @abc.abstractmethod - def _run(self): - pass + @abstractmethod + def _run(self): ... @staticmethod def extra_parser(): @@ -692,6 +734,8 @@ def exit(self): class BonsaiRecordingMixin(BaseSession): + config: dict + def init_mixin_bonsai_recordings(self, *args, **kwargs): self.bonsai_camera = Bunch({'udp_client': OSCClient(port=7111)}) self.bonsai_microphone = Bunch({'udp_client': OSCClient(port=7112)}) @@ -979,7 +1023,7 @@ def start_mixin_rotary_encoder(self): log.info('Rotary encoder module loaded: OK') -class ValveMixin(BaseSession): +class ValveMixin(BaseSession, HasBpod): def init_mixin_valve(self: object): self.valve = Bunch({}) # the template settings files have a date in 2099, so assume that the rig is not calibrated if that is the case @@ -1040,7 +1084,7 @@ def valve_open(self, reward_valve_time): return self.bpod.session.current_trial.export() -class SoundMixin(BaseSession): +class SoundMixin(BaseSession, HasBpod): """Sound interface methods for state machine.""" def init_mixin_sound(self): diff --git a/iblrig/gui/tools.py b/iblrig/gui/tools.py index 2cfc72514..50180120c 100644 --- a/iblrig/gui/tools.py +++ b/iblrig/gui/tools.py @@ -1,4 +1,5 @@ import argparse +import logging import subprocess import sys import traceback @@ -24,12 +25,17 @@ pyqtSlot, ) from PyQt5.QtGui import QStandardItem, QStandardItemModel -from PyQt5.QtWidgets import QListView, QProgressBar +from PyQt5.QtWidgets import QAction, QLineEdit, QListView, QProgressBar, QPushButton +from requests import HTTPError from iblrig.constants import BASE_PATH +from iblrig.gui import resources_rc # noqa: F401 from iblrig.net import get_remote_devices from iblrig.pydantic_definitions import RigSettings from iblutil.util import dir_size +from one.webclient import AlyxClient + +log = logging.getLogger(__name__) def convert_uis(): @@ -381,3 +387,271 @@ def update(self): item.setStatusTip(f'Remote Device "{device_name}" - {device_address}') item.setData(device_name, Qt.UserRole) self.appendRow(item) + + +class AlyxObject(QObject): + """ + A class to manage user authentication with an AlyxClient. + + This class provides methods to log in and log out users, emitting signals to indicate changes in authentication status. + + Parameters + ---------- + alyxUrl : str, optional + The base URL for the Alyx API. If provided, an AlyxClient will be created. + alyxClient : AlyxClient, optional + An existing AlyxClient instance. If provided, it will be used for authentication. + + Attributes + ---------- + isLoggedIn : bool + Indicates whether a user is currently logged in. + username : str or None + The username of the logged-in user, or None if not logged in. + statusChanged : pyqtSignal + Emitted when the login status changes (logged in or out). The signal carries a boolean indicating the new status. + loggedIn : pyqtSignal + Emitted when a user logs in. The signal carries a string representing the username. + loggedOut : pyqtSignal + Emitted when a user logs out. The signal carries a string representing the username. + loginFailed : pyqtSignal + Emitted when a login attempt fails. The signal carries a string representing the username. + """ + + statusChanged = pyqtSignal(bool) + loggedIn = pyqtSignal(str) + loggedOut = pyqtSignal(str) + loginFailed = pyqtSignal(str) + + def __init__(self, *args, alyxUrl: str | None = None, alyxClient: AlyxClient | None = None, **kwargs): + """ + Initializes the AlyxObject. + + Parameters + ---------- + *args : tuple + Positional arguments for QObject. + alyxUrl : str, optional + The base URL for the Alyx API. + alyxClient : AlyxClient, optional + An existing AlyxClient instance. + **kwargs : dict + Keyword arguments for QObject. + """ + super().__init__(*args, **kwargs) + self._icon = super().icon() + + if alyxUrl is not None: + self.client = AlyxClient(base_url=alyxUrl, silent=True) + else: + self.client = alyxClient + + @pyqtSlot(str) + @pyqtSlot(str, str) + @pyqtSlot(str, str, bool) + def logIn(self, username: str, password: str | None = None, cacheToken: bool = False) -> bool: + """ + Logs in a user with the provided username and password. + + Emits the loggedIn and statusChanged signals if the logout is successful, and the loginFailed signal otherwise. + + Parameters + ---------- + username : str + The username of the user attempting to log in. + password : str or None, optional + The password of the user. If None, the login will proceed without a password. + cacheToken : bool, optional + Whether to cache the authentication token. + + Returns + ------- + bool + True if the login was successful, False otherwise. + """ + if self.client is None: + return False + try: + self.client.authenticate(username, password, cache_token=cacheToken, force=password is not None) + except HTTPError as e: + if e.errno == 400 and any(x in e.response.text for x in ('credentials', 'required')): + log.error(e.filename) + self.loginFailed.emit(username) + else: + raise e + if status := self.client.is_logged_in and self.client.user == username: + log.debug(f"Logged into {self.client.base_url} as user '{username}'") + self.statusChanged.emit(True) + self.loggedIn.emit(username) + return status + + @pyqtSlot() + def logOut(self) -> None: + """ + Logs out the currently logged-in user. + + Emits the loggedOut and statusChanged signals if the logout is successful. + """ + if self.client is None or not self.isLoggedIn: + return + username = self.client.user + self.client.logout() + if not (connected := self.client.is_logged_in): + log.debug(f"User '{username}' logged out of {self.client.base_url}") + self.statusChanged.emit(connected) + self.loggedOut.emit(username) + + @property + def isLoggedIn(self): + """Indicates whether a user is currently logged in.""" + return self.client.is_logged_in if isinstance(self.client, AlyxClient) else False + + @property + def username(self) -> str | None: + """The username of the logged-in user, or None if not logged in.""" + return self.client.user if self.isLoggedIn else None + + +class LineEditAlyxUser(QLineEdit): + """ + A custom QLineEdit widget for managing user login with an AlyxObject. + + This widget displays a checkmark icon to indicate the connection status + and allows the user to input their username for logging in. + + Parameters + ---------- + *args : tuple + Positional arguments passed to the QLineEdit constructor. + alyx : AlyxObject + An instance of AlyxObject used to manage login and connection status. + **kwargs : dict + Keyword arguments passed to the QLineEdit constructor. + """ + + def __init__(self, *args, alyx: AlyxObject, **kwargs): + """ + Initializes the LineEditAlyxUser widget. + + Sets up the checkmark icon, connects signals for login status, + and configures the line edit based on the AlyxObject's state. + + Parameters + ---------- + *args : tuple + Positional arguments passed to the QLineEdit constructor. + alyx : AlyxObject + An instance of AlyxObject. + **kwargs : dict + Keyword arguments passed to the QLineEdit constructor. + """ + super().__init__(*args, **kwargs) + self.alyx = alyx + + # Use a QAction to indicate the connection status + self._checkmarkIcon = QAction(parent=self, icon=QtGui.QIcon(':/images/check')) + self.addAction(self._checkmarkIcon, self.ActionPosition.TrailingPosition) + + if self.alyx.client is None: + self.setEnabled(False) + else: + self.setPlaceholderText('not logged in') + self.alyx.statusChanged.connect(self._onStatusChanged) + self.returnPressed.connect(self.logIn) + self._onStatusChanged(self.alyx.isLoggedIn) + + @pyqtSlot(bool) + def _onStatusChanged(self, connected: bool): + """Set some of the widget's properties depending on the current connection-status.""" + self._checkmarkIcon.setVisible(connected) + self._checkmarkIcon.setToolTip(f'Connected to {self.alyx.client.base_url}' if connected else '') + self.setText(self.alyx.username or '') + self.setReadOnly(connected) + + @pyqtSlot() + def logIn(self): + """Attempt to log in using the line edit's current text.""" + self.alyx.logIn(self.text()) + + +class StatefulButton(QPushButton): + """ + A QPushButton that maintains an active/inactive state and emits different signals + based on its state when clicked. + + Parameters + ---------- + active : bool, optional + Initial state of the button (default is False). + + Attributes + ---------- + clickedWhileActive : pyqtSignal + Emitted when the button is clicked while it is in the active state. + clickedWhileInactive : pyqtSignal + Emitted when the button is clicked while it is in the inactive state. + stateChanged : pyqtSignal + Emitted when the button's state has changed. The signal carries the new state. + """ + + clickedWhileActive = pyqtSignal() + clickedWhileInactive = pyqtSignal() + stateChanged = pyqtSignal(bool) + + def __init__(self, *args, active: bool = False, **kwargs): + """ + Initialize the StateButton with the specified active state. + + Parameters + ---------- + *args : tuple + Positional arguments to be passed to the QPushButton constructor. + active : bool, optional + Initial state of the button (default is False). + **kwargs : dict + Keyword arguments to be passed to the QPushButton constructor. + """ + super().__init__(*args, **kwargs) + self._isActive = active + self.clicked.connect(self._onClick) + + @pyqtProperty(bool) + def isActive(self) -> bool: + """ + Get the active state of the button. + + Returns + ------- + bool + True if the button is active, False otherwise. + """ + return self._isActive + + @pyqtSlot(bool) + def setActive(self, active: bool): + """ + Set the active state of the button. + + Emits `stateChanged` if the state has changed. + + Parameters + ---------- + active : bool + The new active state of the button. + """ + if self._isActive != active: + self._isActive = active + self.stateChanged.emit(self._isActive) + + @pyqtSlot() + def _onClick(self): + """ + Handle the button click event. + + Emits `clickedWhileActive` if the button is active, + otherwise emits `clickedWhileInactive`. + """ + if self._isActive: + self.clickedWhileActive.emit() + else: + self.clickedWhileInactive.emit() diff --git a/iblrig/gui/wizard.py b/iblrig/gui/wizard.py index 395086136..1aeab0238 100644 --- a/iblrig/gui/wizard.py +++ b/iblrig/gui/wizard.py @@ -249,12 +249,14 @@ def login( QtWidgets.QMessageBox().critical(None, 'Error', f'{message}\n\n{solution}') # get subjects from Alyx: this is the set of subjects that are alive and not stock in the lab defined in settings - rest_subjects = self.alyx.rest('subjects', 'list', alive=True, stock=False, lab=self.iblrig_settings['ALYX_LAB']) + rest_subjects = self.alyx.rest( + 'subjects', 'list', alive=True, stock=False, lab=self.iblrig_settings['ALYX_LAB'], no_cache=True + ) self.all_subjects.remove(self.test_subject_name) self.all_subjects = [self.test_subject_name] + sorted(set(self.all_subjects + [s['nickname'] for s in rest_subjects])) # then get the projects that map to the current user - rest_projects = self.alyx.rest('projects', 'list') + rest_projects = self.alyx.rest('projects', 'list', no_cache=True) projects = [p['name'] for p in rest_projects if (username in p['users'] or len(p['users']) == 0)] self.all_projects = sorted(set(projects + self.all_projects)) diff --git a/iblrig/net.py b/iblrig/net.py index 745888539..738a5c865 100644 --- a/iblrig/net.py +++ b/iblrig/net.py @@ -10,6 +10,8 @@ tasks: 'udp://123.654.8.8' ``` +Todo +---- TODO case study: starts services but times out due to one service. How to restart without stopping services? Perhaps it can throw a warning if the status is running but continue on anyway? diff --git a/iblrig/pydantic_definitions.py b/iblrig/pydantic_definitions.py index 2b4bd9642..0d68312a4 100644 --- a/iblrig/pydantic_definitions.py +++ b/iblrig/pydantic_definitions.py @@ -3,6 +3,7 @@ from pathlib import Path from typing import Annotated, Literal +import pandas as pd from annotated_types import Ge, Le from pydantic import ( AnyUrl, @@ -17,10 +18,11 @@ field_serializer, field_validator, ) +from pydantic_core._pydantic_core import PydanticUndefined from iblrig.constants import BASE_PATH -FilePath = Annotated[FilePath, PlainSerializer(lambda s: str(s), return_type=str)] +ExistingFilePath = Annotated[FilePath, PlainSerializer(lambda s: str(s), return_type=str)] """Validate that path exists and is file. Cast to str upon save.""" BehaviourInputPort = Annotated[int, Ge(1), Le(4)] @@ -108,7 +110,7 @@ class HardwareSettingsRotaryEncoder(BunchModel): class HardwareSettingsScreen(BunchModel): - DISPLAY_IDX: int = Field(gte=0, lte=1) # -1 = Default, 0 = First, 1 = Second, 2 = Third, etc + DISPLAY_IDX: int = Field(ge=0, le=1) # -1 = Default, 0 = First, 1 = Second, 2 = Third, etc SCREEN_FREQ_TARGET: int = Field(gt=0) SCREEN_FREQ_TEST_DATE: date | None = None SCREEN_FREQ_TEST_STATUS: str | None = None @@ -161,12 +163,12 @@ class HardwareSettingsCamera(BunchModel): class HardwareSettingsCameraWorkflow(BunchModel): - setup: FilePath | None = Field( + setup: ExistingFilePath | None = Field( title='Optional camera setup workflow', default=None, description='An optional path to the camera setup Bonsai workflow.', ) - recording: FilePath = Field( + recording: ExistingFilePath = Field( title='Camera recording workflow', description='The path to the Bonsai workflow for camera recording.' ) @@ -199,3 +201,40 @@ class HardwareSettings(BunchModel): device_cameras: dict[str, dict[str, HardwareSettingsCameraWorkflow | HardwareSettingsCamera]] | None device_microphone: HardwareSettingsMicrophone | None = None VERSION: str + + +class TrialDataModel(BaseModel): + """ + A data model for trial data that extends BaseModel. + + This model allows for the addition of extra fields beyond those defined in the model. + """ + + model_config = ConfigDict(extra='allow') # allow adding extra fields + + @classmethod + def preallocate_dataframe(cls, n_rows: int) -> pd.DataFrame: + """ + Preallocate a DataFrame with specified number of rows, using default values or pandas.NA. + + This method creates a pandas DataFrame with the same columns as the fields defined in the Pydantic model. + Each column is initialized with the field's default value if available, otherwise with pandas.NA. + + We use Pandas.NA for default values rather than NaN, None or Zero. This allows us to clearly indicate missing + values - which will raise a Pydantic ValidationError. + + Parameters + ---------- + n_rows : int + The number of rows to create in the DataFrame. + + Returns + ------- + pd.DataFrame + A DataFrame with `n_rows` rows and columns corresponding to the model's fields. + """ + data = {} + for field, field_info in cls.model_fields.items(): + default_value = field_info.default if field_info.default is not PydanticUndefined else pd.NA + data[field] = [default_value] * n_rows + return pd.DataFrame(data) diff --git a/iblrig/serial_singleton.py b/iblrig/serial_singleton.py index d94c061b1..d2cad388f 100644 --- a/iblrig/serial_singleton.py +++ b/iblrig/serial_singleton.py @@ -188,7 +188,7 @@ def query(self, query, data_specifier=1): Parameters ---------- - query : any + query : Any Query to be sent to the serial device. data_specifier : int or str, default: 1 The number of bytes to receive from the serial device, or a format string @@ -222,7 +222,7 @@ def to_bytes(data: Any) -> bytes: Parameters ---------- - data : any + data : Any Data to be converted to bytestring. Returns diff --git a/iblrig/test/tasks/test_biased_choice_world_family.py b/iblrig/test/tasks/test_biased_choice_world_family.py index 1028e041c..ff5b96717 100644 --- a/iblrig/test/tasks/test_biased_choice_world_family.py +++ b/iblrig/test/tasks/test_biased_choice_world_family.py @@ -45,7 +45,7 @@ def test_task(self, reward_set: np.ndarray | None = None): # makes sure the water reward counts check out assert task.trials_table['reward_amount'].sum() == task.session_info.TOTAL_WATER_DELIVERED assert np.sum(task.trials_table['reward_amount'] == 0) == task.trial_num + 1 - task.session_info.NTRIALS_CORRECT - assert np.all(~np.isnan(task.trials_table['reward_valve_time'])) + assert not task.trials_table['reward_valve_time'].isna().any() # Test the blocks task logic df_blocks = task.trials_table.groupby('block_num').agg( count=pd.NamedAgg(column='stim_angle', aggfunc='count'), @@ -105,6 +105,17 @@ def setUp(self) -> None: self.get_task_kwargs() self.task = EphysChoiceWorldSession(**self.task_kwargs) + def test_task(self, _=None): + super().test_task() + + # check that the task in fact uses the pre-generated data + cols = list( + set(self.task.get_session_template(0).columns) + - {'index', 'reward_amount', 'reward_valve_time', 'response_side', 'response_time', 'trial_correct'} + ) + template = self.task.get_session_template(0).head(len(self.task.trials_table)) + assert (self.task.trials_table == template)[cols].all().all() + class TestNeuroModulatorBiasedChoiceWorld(TestInstantiationBiased): def setUp(self) -> None: diff --git a/iblrig/test/tasks/test_training_choice_world.py b/iblrig/test/tasks/test_training_choice_world.py index 88dc9bf99..793970c9b 100644 --- a/iblrig/test/tasks/test_training_choice_world.py +++ b/iblrig/test/tasks/test_training_choice_world.py @@ -77,9 +77,9 @@ def test_task(self): normalized_counts = normalized_counts / (nt / contrast_set.size) np.testing.assert_array_less(normalized_counts, 0.33) if debias: - assert np.sum(trials_table['debias_trial']) > 20 + assert trials_table.debias_trial.astype(int).sum() > 20 else: - assert np.sum(trials_table['debias_trial']) == 0 + assert trials_table.debias_trial.astype(int).sum() == 0 class TestInstantiationTraining(BaseTestCases.CommonTestInstantiateTask): diff --git a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py index 8cf2ca6c2..83a027786 100644 --- a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py +++ b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py @@ -24,6 +24,12 @@ def __init__(self, *args, session_template_id=0, **kwargs): block_length=pd.NamedAgg(column='stim_probability_left', aggfunc='count'), ) + def next_trial(self): + self.trial_num += 1 + trial_params = self.trials_table.iloc[self.trial_num].drop(['index', 'trial_num']).to_dict() + self.block_num = trial_params['block_num'] + self.draw_next_trial_info(**trial_params) + @staticmethod def get_session_template(session_template_id: int) -> pd.DataFrame: """ diff --git a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/trials_fixtures.pqt b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/trials_fixtures.pqt index 165d1ee50..35cea61a2 100644 Binary files a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/trials_fixtures.pqt and b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/trials_fixtures.pqt differ diff --git a/iblrig_tasks/_iblrig_tasks_neuroModulatorChoiceWorld/task.py b/iblrig_tasks/_iblrig_tasks_neuroModulatorChoiceWorld/task.py index 71fea2bc2..5e240b58d 100644 --- a/iblrig_tasks/_iblrig_tasks_neuroModulatorChoiceWorld/task.py +++ b/iblrig_tasks/_iblrig_tasks_neuroModulatorChoiceWorld/task.py @@ -1,9 +1,10 @@ import logging import numpy as np +from pydantic import NonNegativeFloat import iblrig.misc -from iblrig.base_choice_world import BiasedChoiceWorldSession +from iblrig.base_choice_world import BiasedChoiceWorldSession, BiasedChoiceWorldTrialData from iblrig.hardware import SOFTCODE from pybpodapi.protocol import StateMachine @@ -11,13 +12,17 @@ log = logging.getLogger(__name__) +class NeuroModulatorChoiceTrialData(BiasedChoiceWorldTrialData): + omit_feedback: bool + choice_delay: NonNegativeFloat + + class Session(BiasedChoiceWorldSession): protocol_name = '_iblrig_tasks_neuromodulatorChoiceWorld' + TrialDataModel = NeuroModulatorChoiceTrialData def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.trials_table['omit_feedback'] = np.zeros(self.trials_table.shape[0], dtype=bool) - self.trials_table['choice_delay'] = np.zeros(self.trials_table.shape[0], dtype=np.float32) def next_trial(self): super().next_trial() diff --git a/pdm.lock b/pdm.lock index a5b08782c..99581b35d 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "ci", "dev", "doc", "project-extraction", "test", "typing"] strategy = ["inherit_metadata"] lock_version = "4.5.0" -content_hash = "sha256:02c08779c288dce1bc517fc41e2c3d385fd24fc15a0e2fa491f87e5f94863e0b" +content_hash = "sha256:714a13365ed6f4d37114c9146f1b274c61e9b99dee7955f3add4550cabdf3845" [[metadata.targets]] requires_python = "==3.10.*" @@ -567,6 +567,7 @@ version = "1.2.2" requires_python = ">=3.7" summary = "Backport of PEP 654 (exception groups)" groups = ["default", "ci", "dev", "doc", "test"] +marker = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -700,7 +701,7 @@ version = "3.0.3" requires_python = ">=3.7" summary = "Lightweight in-process concurrent programming" groups = ["dev", "doc"] -marker = "platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\"" +marker = "(platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\") and python_version < \"3.13\"" files = [ {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, @@ -1872,24 +1873,24 @@ files = [ [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" requires_python = ">=3.8" summary = "Data validation using Python type hints" groups = ["default"] dependencies = [ - "annotated-types>=0.4.0", - "pydantic-core==2.20.1", + "annotated-types>=0.6.0", + "pydantic-core==2.23.3", "typing-extensions>=4.12.2; python_version >= \"3.13\"", "typing-extensions>=4.6.1; python_version < \"3.13\"", ] files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" requires_python = ">=3.8" summary = "Core functionality for Pydantic validation and serialization" groups = ["default"] @@ -1897,27 +1898,27 @@ dependencies = [ "typing-extensions!=4.7.0,>=4.6.0", ] files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [[package]] @@ -3039,6 +3040,17 @@ files = [ {file = "tycmd_wrapper-0.2.1-py3-none-win_amd64.whl", hash = "sha256:73c17d7c6d073d7dd2b050731988a5fabb3be69ec339c3ef5fcfaae42eefe090"}, ] +[[package]] +name = "types-pyserial" +version = "3.5.0.20240826" +requires_python = ">=3.8" +summary = "Typing stubs for pyserial" +groups = ["dev", "typing"] +files = [ + {file = "types-pyserial-3.5.0.20240826.tar.gz", hash = "sha256:c88c603734410ad714fba85eb10f145dc592ccf1542bb958f12a8481722f37db"}, + {file = "types_pyserial-3.5.0.20240826-py3-none-any.whl", hash = "sha256:f3fddafe593060afeec489ed6f6a18dcf05ae5eae1c8e9026b50c7960f00b076"}, +] + [[package]] name = "types-python-dateutil" version = "2.9.0.20240316" diff --git a/pyproject.toml b/pyproject.toml index 4ec7a74f7..daaaf9811 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ "numpy>=1.26.4", "packaging>=24.1", "pandas>=2.2.2", - "pydantic>=2.8.2", + "pydantic>=2.9.1", "pyqtgraph>=0.13.7", "python-osc>=1.8.3", "pyusb>=1.2.1", @@ -96,6 +96,7 @@ typing = [ "types-PyYAML>=6.0.12.20240808", "types-requests>=2.32.0.20240712", "types-python-dateutil>=2.9.0.20240316", + "types-pyserial>=3.5.0.20240826", ] ci = [ "pytest-github-actions-annotate-failures>=0.2.0", diff --git a/scripts/validate_hifi.py b/scripts/validate_hifi.py new file mode 100644 index 000000000..9ec77a201 --- /dev/null +++ b/scripts/validate_hifi.py @@ -0,0 +1,50 @@ +# Validate sound output of the Bpod HiFi Module across a range of configurations +# +# When running this script you should hear a series of identical beeps (500 ms, 440 Hz). +# Any distortion, crackling, pops etc could indicate an issue with the HiFi module. +# +# NOTE: Adapt SERIAL_PORT according to the connected hardware. +# WARNING: Be careful when using headphones for testing - the sound-output could be very loud! + +import logging +from time import sleep + +import numpy as np + +from iblrig.hifi import HiFi +from iblutil.util import setup_logger + +setup_logger(name='iblrig', level='DEBUG') +log = logging.getLogger(__name__) + +SERIAL_PORT = '/dev/ttyACM0' +DURATION_SEC = 0.5 +PAUSE_SEC = 0.5 +FREQUENCY_HZ = 480 +FADE_SEC = 0.02 + +hifi = HiFi(SERIAL_PORT, attenuation_db=0) + +for channels in ['mono', 'stereo']: + for sampling_rate_hz in [44100, 48e3, 96e3, 192e3]: + # create signal + t = np.linspace(0, DURATION_SEC, int(sampling_rate_hz * DURATION_SEC), False) + sound = np.sin(2 * np.pi * FREQUENCY_HZ * t) * 0.1 + + # avoid pops by fading the signal in and out + fade = np.linspace(0, 1, round(FADE_SEC * sampling_rate_hz)) + sound[: len(fade)] *= fade + sound[-len(fade) :] *= np.flip(fade) + + # create stereo signal by duplication + if channels == 'stereo': + sound = sound.reshape(-1, 1).repeat(2, axis=1) + + # load & play sound + hifi.sampling_rate_hz = sampling_rate_hz + hifi.load(0, sound) + hifi.push() + hifi.play(0) + + # wait for next iteration + sleep(DURATION_SEC + PAUSE_SEC)