diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index fd11829ef..ca305c6ef 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,13 +46,6 @@ jobs: if: matrix.os == 'ubuntu-latest' run: sudo apt-get install -y libportaudio2 - - name: Install Bonsai (Windows only) - if: matrix.os == 'windows-latest' - shell: pwsh -l {0} - run: | - cd Bonsai - powershell.exe .\install.ps1 - - name: Create config files (Ubuntu) if: matrix.os == 'ubuntu-latest' run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 11444df8d..860f65c38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ Changelog --------- ------------------------------- +8.10.0 +------ +* adaptive reward from previous sessions in TrainingChoiceWorld +* updater: fetch remote changelog to advertise new features 8.9.4 ----- diff --git a/iblrig/__init__.py b/iblrig/__init__.py index 541cc2dc4..2396cf832 100644 --- a/iblrig/__init__.py +++ b/iblrig/__init__.py @@ -1,7 +1,10 @@ # PLEASE REMEMBER TO: -# 1) update CHANGELOG.md -# 2) git tag the release in accordance to the version number below (after merge!) -__version__ = '8.9.4' +# 1) update CHANGELOG.md including changes from the last tag +# 2) Pull request to iblrigv8dev +# 3) Check CI and eventually wet lab test +# 4) Pull request to iblrigv8 +# 5) git tag the release in accordance to the version number below (after merge!) +__version__ = '8.10.0' # The following method call will try to get post-release information (i.e. the number of commits since the last tagged # release corresponding to the one above), plus information about the state of the local repository (dirty/broken) diff --git a/iblrig/base_choice_world.py b/iblrig/base_choice_world.py index 7a4970c4a..0a71c4181 100644 --- a/iblrig/base_choice_world.py +++ b/iblrig/base_choice_world.py @@ -375,6 +375,10 @@ def get_state_machine_trial(self, i): def next_trial(self): pass + @property + def reward_amount(self): + return self.task_params.REWARD_AMOUNT_UL + def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None): contrast = contrast or misc.draw_contrast(self.task_params.CONTRAST_SET, self.task_params.CONTRAST_SET_PROBABILITY_TYPE) assert len(self.task_params.STIM_POSITIONS) == 2, "Only two positions are supported" @@ -389,7 +393,7 @@ def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None): self.trials_table.at[self.trial_num, 'stim_freq'] = self.task_params.STIM_FREQ self.trials_table.at[self.trial_num, 'trial_num'] = self.trial_num self.trials_table.at[self.trial_num, 'position'] = position - self.trials_table.at[self.trial_num, 'reward_amount'] = self.task_params.REWARD_AMOUNT_UL + self.trials_table.at[self.trial_num, 'reward_amount'] = self.reward_amount self.trials_table.at[self.trial_num, 'stim_probability_left'] = pleft self.send_trial_info_to_bonsai() @@ -559,7 +563,18 @@ def get_state_machine_trial(self, i): class ActiveChoiceWorldSession(ChoiceWorldSession): - + """ + The ActiveChoiceWorldSession is a base class for protocols where the mouse is actively making decisions + by turning the wheel. It has the following characteristics + - it is trial based + - it is decision based + - left and right simulus are equiprobable: there is no biased block + - a trial can either be correct / error / no_go depending on the side of the stimulus and the response + - it has a quantifiable performance by computing the proportion of correct trials of passive stimulations protocols or + habituation protocols. + + The TrainingChoiceWorld, BiasedChoiceWorld are all subclasses of this class + """ def __init__(self, **kwargs): super(ActiveChoiceWorldSession, self).__init__(**kwargs) self.trials_table['stim_probability_left'] = np.zeros(NTRIALS_INIT, dtype=np.float32) @@ -620,6 +635,10 @@ def trial_completed(self, bpod_data): class BiasedChoiceWorldSession(ActiveChoiceWorldSession): + """ + Biased choice world session is the instantiation of ActiveChoiceWorld where the notion of biased + blocks is introduced. + """ protocol_name = "_iblrig_tasks_biasedChoiceWorld" def __init__(self, **kwargs): @@ -687,29 +706,28 @@ def show_trial_log(self): class TrainingChoiceWorldSession(ActiveChoiceWorldSession): + """ + The TrainingChoiceWorldSession corresponds to the first training protocol of the choice world task. + This protocol has a complicated adaptation of the number of contrasts (embodied by the training_phase + property) and the reward amount, embodied by the adaptive_reward property. + """ protocol_name = "_iblrig_tasks_trainingChoiceWorld" - def __init__(self, training_phase=-1, **kwargs): + def __init__(self, training_phase=-1, adaptive_reward=-1.0, **kwargs): super(TrainingChoiceWorldSession, self).__init__(**kwargs) - from iblrig.choiceworld import get_training_phase + inferred_training_phase, inferred_adaptive_reward = self.get_subject_training_info() if training_phase == -1: - try: - training_phase = get_training_phase(self.session_info.SUBJECT_NAME) - self.logger.warning(f"Got training phase: {training_phase}") - except Exception as ex: - self.logger.debug('Failed to get training phase: %s', ex) - if self.interactive: - training_phase = iblrig.graphic.numinput( - "Subject training phase", "Subject training phase: (0-5)", - askint=True, nullable=False, default=0, minval=0, maxval=5) - else: - self.logger.warning(f"Could not get training phase from Alyx: {traceback.format_exc()}, please set it" - f"manually in ./iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py" - f"training phase is set 0 for this session") - training_phase = 0 + self.logger.critical(f"Got training phase: {inferred_training_phase}") + self.training_phase = inferred_training_phase else: - self.logger.warning(f"Training phase manually set to: {training_phase}") - self.training_phase = training_phase + self.logger.critical(f"Training phase manually set to: {training_phase}") + self.training_phase = training_phase + if adaptive_reward == -1: + self.logger.critical(f"Got Adaptive reward {inferred_adaptive_reward} uL") + self.session_info["ADAPTIVE_REWARD_AMOUNT_UL"] = inferred_adaptive_reward + else: + self.logger.critical(f"Adaptive reward manually set to {adaptive_reward} uL") + self.session_info["ADAPTIVE_REWARD_AMOUNT_UL"] = adaptive_reward self.var = { "training_phase_trial_counts": np.zeros(6), "last_10_responses_sides": np.zeros(10), @@ -717,6 +735,34 @@ def __init__(self, training_phase=-1, **kwargs): self.trials_table['training_phase'] = np.zeros(NTRIALS_INIT, dtype=np.int8) self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool) + @property + def reward_amount(self): + return self.session_info.get("ADAPTIVE_REWARD_AMOUNT_UL", self.task_params.REWARD_AMOUNT_UL) + + def get_subject_training_info(self): + """ + Get the previous session's according to this session parameters and deduce the + training level and adaptive reward amount. + :return: + """ + try: + training_phase, adaptive_reward, _ = choiceworld.get_subject_training_info( + subject_name=self.session_info.SUBJECT_NAME, + subject_weight_grams=self.session_info['SUBJECT_WEIGHT'], + default_reward=self.task_params.REWARD_AMOUNT_UL, + local_path=self.iblrig_settings['iblrig_local_data_path'], + remote_path=self.iblrig_settings['iblrig_remote_data_path'], + lab=self.iblrig_settings['ALYX_LAB'], + task_name=self.protocol_name, + ) + except Exception: + self.logger.critical('Failed to get training information from previous subjects: %s', traceback.format_exc()) + training_phase, adaptive_reward = ( + iblrig.choiceworld.DEFAULT_TRAINING_PHASE, iblrig.choiceworld.DEFAULT_REWARD_VOLUME) + self.logger.critical(f'The mouse will train on level {training_phase} and with reward {adaptive_reward} uL') + + return training_phase, adaptive_reward + def compute_performance(self): """ Aggregates the trials table to compute the performance of the mouse on each contrast diff --git a/iblrig/base_tasks.py b/iblrig/base_tasks.py index e801b8d44..0a7d3c358 100644 --- a/iblrig/base_tasks.py +++ b/iblrig/base_tasks.py @@ -86,13 +86,10 @@ def __init__(self, subject=None, task_parameter_file=None, file_hardware_setting self.iblrig_settings = iblrig.path_helper.load_settings_yaml(file_iblrig_settings or 'iblrig_settings.yaml') if iblrig_settings is not None: self.iblrig_settings.update(iblrig_settings) - if self.iblrig_settings['iblrig_local_data_path'] is None: - self.iblrig_settings['iblrig_local_data_path'] = Path.home().joinpath('iblrig_data') - else: - self.iblrig_settings['iblrig_local_data_path'] = Path(self.iblrig_settings['iblrig_local_data_path']) # Load the tasks settings, from the task folder or override with the input argument task_parameter_file = task_parameter_file or Path(inspect.getfile(self.__class__)).parent.joinpath('task_parameters.yaml') self.task_params = Bunch({}) + self.wizard = wizard # first loads the base parameters for a given task if self.base_parameters_file is not None and self.base_parameters_file.exists(): @@ -150,17 +147,18 @@ def _init_paths(self, append: bool = False): DATA_FILE_PATH: contains the bpod trials >>> C:\iblrigv8_data\mainenlab\Subjects\SWC_043\2019-01-01\001\raw_task_data_00\_iblrig_taskData.raw.jsonable # noqa """ + rig_computer_paths = iblrig.path_helper.get_local_and_remote_paths( + local_path=self.iblrig_settings['iblrig_local_data_path'], + remote_path=self.iblrig_settings['iblrig_remote_data_path'], + lab=self.iblrig_settings['ALYX_LAB'] + ) paths = Bunch({'IBLRIG_FOLDER': Path(iblrig.__file__).parents[1]}) paths.BONSAI = paths.IBLRIG_FOLDER.joinpath('Bonsai', 'Bonsai.exe') paths.VISUAL_STIM_FOLDER = paths.IBLRIG_FOLDER.joinpath('visual_stim') - paths.LOCAL_SUBJECT_FOLDER = self.iblrig_settings['iblrig_local_data_path'].joinpath( - self.iblrig_settings['ALYX_LAB'] or '', 'Subjects') - paths.REMOTE_SUBJECT_FOLDER = (Path(self.iblrig_settings['iblrig_remote_data_path']).joinpath('Subjects') - if self.iblrig_settings['iblrig_remote_data_path'] else None) + paths.LOCAL_SUBJECT_FOLDER = rig_computer_paths['local_subjects_folder'] + paths.REMOTE_SUBJECT_FOLDER = rig_computer_paths['remote_subjects_folder'] # initialize the session path - date_folder = self.iblrig_settings['iblrig_local_data_path'].joinpath( - self.iblrig_settings['ALYX_LAB'] or '', - 'Subjects', + date_folder = paths.LOCAL_SUBJECT_FOLDER.joinpath( self.session_info.SUBJECT_NAME, self.session_info.SESSION_START_TIME[:10], ) @@ -386,7 +384,7 @@ def sigint_handler(*args, **kwargs): self.logger.critical("Graceful exit") self.logger.info(f'Session {self.paths.SESSION_RAW_DATA_FOLDER}') self.session_info.SESSION_END_TIME = datetime.datetime.now().isoformat() - if self.interactive: + if self.interactive and not self.wizard: self.session_info.POOP_COUNT = graph.numinput( "Poop count", f"{self.session_info.SUBJECT_NAME} droppings count:", nullable=True, askint=True) self.save_task_parameters_to_json_file() @@ -747,25 +745,6 @@ def start_mixin_rotary_encoder(self): class ValveMixin: - def get_session_reward_amount(self: object) -> float: - # simply returns the reward amount if no adaptive rewared is used - if not self.task_params.ADAPTIVE_REWARD: - return self.task_params.REWARD_AMOUNT - # simply returns the reward amount if no adaptive rewared is used - if not self.task_params.ADAPTIVE_REWARD: - return self.task_params.REWARD_AMOUNT - else: - raise NotImplementedError - # todo: training choice world reward from session to session - # first session : AR_INIT_VALUE, return - # if total_water_session < (subject_weight / 25): - # minimum(last_reward + AR_STEP, AR_MAX_VALUE) 3 microliters AR_MAX_VALUE - # last ntrials strictly below 200: - # keep the same reward - # trial between 200 and above: - # maximum(last_reward - AR_STEP, AR_MIN_VALUE) 1.5 microliters AR_MIN_VALUE - - # when implementing this make sure the test is solid def init_mixin_valve(self: object): self.valve = Bunch({}) diff --git a/iblrig/choiceworld.py b/iblrig/choiceworld.py index 64e92cdfc..f1bbbe0db 100644 --- a/iblrig/choiceworld.py +++ b/iblrig/choiceworld.py @@ -1,58 +1,80 @@ """ -Choice World Task related logic +Choice World Task related logic and functions that translate the task description in +Appendix 2 of the paper into code. """ -from pathlib import Path import numpy as np -from ibllib.io import session_params -from iblrig.raw_data_loaders import load_task_jsonable -from iblrig.path_helper import load_settings_yaml +import iblrig.raw_data_loaders +from iblrig.path_helper import iterate_previous_sessions +from iblutil.util import setup_logger + +logger = setup_logger('iblrig', level='INFO') CONTRASTS = 1 / np.array([-1, - 2, -4, -8, -16, np.inf, 16, 8, 4, 2, 1]) +DEFAULT_TRAINING_PHASE = 0 +DEFAULT_REWARD_VOLUME = 3 -def _get_latest_training_phase_from_folder(folder_subjects): - n_retries = 3 - c = 0 - if folder_subjects is None: - return - for file_experiment in sorted(folder_subjects.rglob('_ibl_experiment.description*.yaml'), reverse=True): - session_path = file_experiment.parent - ad = session_params.read_params(file_experiment) - if '_iblrig_tasks_trainingChoiceWorld' not in ad['tasks'][0]: - continue - for ad_task in ad['tasks']: - adt = ad_task.get('_iblrig_tasks_trainingChoiceWorld', None) - if not adt: - return - trials_data, bpod_data = load_task_jsonable(session_path.joinpath(adt['collection'], '_iblrig_taskData.raw.jsonable')) - if 'training_phase' in trials_data: - training_phase = trials_data['training_phase'].values[-1] - return (training_phase, session_path.parts[-2]) - c += 1 - if c >= n_retries: - break +def compute_adaptive_reward_volume(subject_weight_g, reward_volume_ul, delivered_volume_ul, ntrials): + """ + If the mouse completed over 200 trials in the previous session, the reward volume is automatically + lowered by 0.1 microliters for the next session, but cannot go lower than a floor of 1.5 microliters. + If the mouse received less than its minimum required daily dose (~1 milliliter/25 grams of body weight) + during the previous session, the reward volume is increased by 0.1 microliters for the next session, + but cannot go above a ceiling of 3 microliters. + :param subject_weight_g: in grams + :param reward_volume_ul: the last reward volume setting in uL + :param delivered_volume_ul: the cumulative water deliverd during the last session in uL + :param n_trials: + :return: adaptive_reward_ul + """ + if subject_weight_g > (delivered_volume_ul / 1000 * 25): + reward_volume_ul += 0.1 + elif ntrials > 200: + reward_volume_ul -= 0.1 + return np.maximum(np.minimum(reward_volume_ul, 3), 1.5) -def get_training_phase(subject): +def get_subject_training_info( + subject_name, subject_weight_grams=None, task_name='_iblrig_tasks_trainingChoiceWorld', + default_reward=DEFAULT_REWARD_VOLUME, mode='silent', **kwargs): """ - Goes throught the history of a subject and gets the latest training phase for this subject - :param subject: - :return: + Goes through the history of a subject and gets the latest + training phase and the adaptive reward volume for this subject + :param subject_name: + :param subject_weight_grams: current weight of the subject in grams, if not available, will use the previous session weight + :param default_reward: default reward volume in uL if no previous session is available + :param task_name: name of the protocol to look for in experiment description, + defaults to '_iblrig_tasks_trainingChoiceWorld' + :param mode: 'defaults' or 'raise': if 'defaults' returns default values if no history is found, if 'raise' raises ValueError + :param **kwargs: optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths + if not used, will use the arguments from iblrig/settings/iblrig_settings.yaml + :return: training_phase (int), default_reward uL (float between 1.5 and 3) and status (True if previous was found, + False if unable and default values were returned) """ - DEFAULT_PHASE = 0 - iblrig_settings = load_settings_yaml() - local_subjects_path = Path(iblrig_settings['iblrig_local_data_path']).joinpath(iblrig_settings['ALYX_LAB'], 'Subjects') - local = _get_latest_training_phase_from_folder(local_subjects_path.joinpath(subject)) or (DEFAULT_PHASE, '0000-00-00') - remote = (DEFAULT_PHASE, '0000-00-00') - if iblrig_settings['iblrig_remote_data_path'] is not None: - remote_subjects_path = Path(iblrig_settings['iblrig_remote_data_path']).joinpath('Subjects') - remote = _get_latest_training_phase_from_folder(remote_subjects_path.joinpath(subject)) or (DEFAULT_PHASE, '0000-00-00') - if remote[1] > local[1]: - return remote[0] + session_info = iterate_previous_sessions(subject_name, task_name=task_name, n=1, **kwargs) + if len(session_info) == 0: + if mode == 'silent': + logger.warning("The training status could not be determined returning default values") + return DEFAULT_TRAINING_PHASE, default_reward, False + elif mode == 'raise': + raise ValueError("The training status could not be determined as no previous sessions were found") + else: + session_info = session_info[0] + trials_data, _ = iblrig.raw_data_loaders.load_task_jsonable(session_info.file_task_data) + previous_reward_volume = (session_info.task_settings.get('ADAPTIVE_REWARD_AMOUNT_UL') or + session_info.task_settings.get('REWARD_AMOUNT_UL')) + adaptive_reward = compute_adaptive_reward_volume( + subject_weight_g=subject_weight_grams or session_info.task_settings['SUBJECT_WEIGHT'], + reward_volume_ul=previous_reward_volume, + delivered_volume_ul=trials_data['reward_amount'].sum(), + ntrials=trials_data.shape[0]) + if 'training_phase' in trials_data: + training_phase = trials_data['training_phase'].values[-1] else: - return local[0] + training_phase = DEFAULT_TRAINING_PHASE + return training_phase, adaptive_reward, True def training_contrasts_probabilities(phase=1): diff --git a/iblrig/commands.py b/iblrig/commands.py index d94a869e6..ad053d8fb 100644 --- a/iblrig/commands.py +++ b/iblrig/commands.py @@ -19,7 +19,7 @@ def transfer_video_data(local_subjects_path=None, remote_subjects_path=None, dry=False): local_subjects_path, remote_subjects_path = get_local_and_remote_paths( - local_subjects_path=local_subjects_path, remote_subjects_path=remote_subjects_path) + local_path=local_subjects_path, remote_path=remote_subjects_path) for flag in list(local_subjects_path.rglob('transfer_me.flag')): session_path = flag.parent @@ -31,22 +31,22 @@ def transfer_video_data(local_subjects_path=None, remote_subjects_path=None, dry remote_subjects_path=remote_subjects_path, dry=dry, tag='video') -def transfer_data(local_subjects_path=None, remote_subjects_path=None, dry=False): +def transfer_data(local_path=None, remote_path=None, dry=False): """ Copies the behavior data from the rig to the local server if the session has more than 42 trials If the hardware settings file contains MAIN_SYNC=True, the number of expected devices is set to 1 + :param local_path: local path to the subjects folder :param weeks: :param dry: :return: """ - local_subjects_path, remote_subjects_path = get_local_and_remote_paths( - local_subjects_path=local_subjects_path, remote_subjects_path=remote_subjects_path) + rig_paths = get_local_and_remote_paths(local_path=local_path, remote_path=remote_path) hardware_settings = load_settings_yaml('hardware_settings.yaml') number_of_expected_devices = 1 if hardware_settings.get('MAIN_SYNC', True) else None - for flag in list(local_subjects_path.rglob('transfer_me.flag')): + for flag in list(local_path.rglob('transfer_me.flag')): session_path = flag.parent - sc = BehaviorCopier(session_path, remote_subjects_folder=remote_subjects_path) + sc = BehaviorCopier(session_path, remote_subjects_folder=rig_paths['remote_subjects_folder']) task_settings = raw_data_loaders.load_settings(session_path, task_collection='raw_task_data_00') if task_settings is None: logger.info(f'skipping: no task settings found for {session_path}') @@ -86,28 +86,27 @@ def transfer_data(local_subjects_path=None, remote_subjects_path=None, dry=False logger.critical(f"{sc.state}, {sc.session_path}") sc.run(number_of_expected_devices=number_of_expected_devices) # once we copied the data, remove older session for which the data was successfully uploaded - remove_local_sessions(weeks=2, dry=dry, local_subjects_path=local_subjects_path, remote_subjects_path=remote_subjects_path) + remove_local_sessions(weeks=2, dry=dry, local_path=local_path, remote_path=remote_path) -def remove_local_sessions(weeks=2, local_subjects_path=None, remote_subjects_path=None, dry=False, tag='behavior'): +def remove_local_sessions(weeks=2, local_path=None, remote_path=None, dry=False, tag='behavior'): """ Remove local sessions older than 2 weeks :param weeks: :param dry: :return: """ - local_subjects_path, remote_subjects_path = get_local_and_remote_paths( - local_subjects_path=local_subjects_path, remote_subjects_path=remote_subjects_path) + rig_paths = get_local_and_remote_paths(local_path=local_path, remote_path=remote_path) size = 0 match tag: case 'behavior': Copier = BehaviorCopier case 'video': Copier = VideoCopier - for flag in sorted(list(local_subjects_path.rglob(f'_ibl_experiment.description_{tag}.yaml')), reverse=True): + for flag in sorted(list(rig_paths['local_subjects_folder'].rglob(f'_ibl_experiment.description_{tag}.yaml')), reverse=True): session_path = flag.parent days_elapsed = (datetime.datetime.now() - datetime.datetime.strptime(session_path.parts[-2], '%Y-%m-%d')).days if days_elapsed < (weeks * 7): continue - sc = Copier(session_path, remote_subjects_folder=remote_subjects_path) + sc = Copier(session_path, remote_subjects_folder=rig_paths['remote_subjects_folder']) if sc.state == 3: session_size = sum(f.stat().st_size for f in session_path.rglob('*') if f.is_file()) / 1024 ** 3 logger.info(f"{sc.session_path}, {session_size:0.02f} Go") diff --git a/iblrig/gui/wizard.py b/iblrig/gui/wizard.py index 182e0c6d7..a0845811b 100644 --- a/iblrig/gui/wizard.py +++ b/iblrig/gui/wizard.py @@ -264,15 +264,13 @@ def controls_for_extra_parameters(self): self.controller2model() self.task_arguments = dict() - # - args_general = sorted(_get_task_argument_parser()._actions, key=lambda x: x.dest) - args_general = [x for x in args_general - if not any(set(x.option_strings).intersection(['--subject', '--user', '--projects', - '--log-level', '--procedures', '--weight', - '--help', '--append', '--no-interactive', - '--stub', '--wizard']))] - args_extra = sorted(self.model.get_task_extra_parser(self.model.task_name)._actions, key=lambda x: x.dest) - args = args_extra + args_general + # collect & filter list of parser arguments (general & task specific) + args = sorted(_get_task_argument_parser()._actions, key=lambda x: x.dest) + args = [x for x in args + if not any(set(x.option_strings).intersection(['--subject', '--user', '--projects', '--log-level', + '--procedures', '--weight', '--help', '--append', + '--no-interactive', '--stub', '--wizard']))] + args = sorted(self.model.get_task_extra_parser(self.model.task_name)._actions, key=lambda x: x.dest) + args group = self.uiGroupTaskParameters layout = group.layout() @@ -282,10 +280,8 @@ def controls_for_extra_parameters(self): layout.removeRow(0) for idx, arg in enumerate(args): - label = arg.option_strings[0] - label = label.replace('_', ' ').replace('--', '').title() - label = label.replace('Id', 'ID') - param = arg.option_strings[0] + param = max(arg.option_strings, key=len) + label = param.replace('_', ' ').replace('--', '').title() # create widget for bool arguments if isinstance(arg, (argparse._StoreTrueAction, argparse._StoreFalseAction)): @@ -293,7 +289,7 @@ def controls_for_extra_parameters(self): widget.setTristate(False) if arg.default: widget.setCheckState(arg.default * 2) - widget.toggled.connect(lambda val, a=arg: self._set_task_arg(a.option_strings[0], val > 0)) + widget.toggled.connect(lambda val, p=param: self._set_task_arg(param, val > 0)) widget.toggled.emit(widget.isChecked() > 0) # create widget for string arguments @@ -325,22 +321,46 @@ def controls_for_extra_parameters(self): if arg.default: widget.setValue(arg.default) widget.valueChanged.connect( - lambda val, a=arg: self._set_task_arg(a.option_strings[0], str(val))) + lambda val, p=param: self._set_task_arg(p, str(val))) widget.valueChanged.emit(widget.value()) # no other argument types supported for now else: continue + # add custom widget properties + QtCore.QMetaProperty + widget.setProperty('parameter_name', param) + widget.setProperty('parameter_dest', arg.dest) + # display help strings as status tip if arg.help: widget.setStatusTip(arg.help) - if label == 'Training Phase': - widget.setSpecialValueText('automatic') - widget.setMaximum(5) - widget.setMinimum(-1) - widget.setValue(-1) + # some customizations + match widget.property('parameter_dest'): + case 'session_template_id': + label = 'Session Template ID' + + case 'delay_secs': + label = 'Initial Delay, s' + + case 'training_phase': + widget.setSpecialValueText('automatic') + widget.setMaximum(5) + widget.setMinimum(-1) + widget.setValue(-1) + + case 'adaptive_reward': + label = 'Reward Amount, μl' + widget.setSpecialValueText('automatic') + widget.setMaximum(3) + widget.setSingleStep(0.1) + widget.setMinimum(-1) + widget.setValue(widget.minimum()) + widget.valueChanged.connect( + lambda val, a=arg: + self._set_task_arg(a.option_strings[0], str(val if val > widget.minimum() else -1))) layout.addRow(self.tr(label), widget) diff --git a/iblrig/misc.py b/iblrig/misc.py index 7e7d00db1..fed28ea86 100644 --- a/iblrig/misc.py +++ b/iblrig/misc.py @@ -9,11 +9,7 @@ import datetime import json import logging -import os -import shutil -import subprocess from pathlib import Path -from sys import platform from typing import Optional, Union import numpy as np @@ -86,50 +82,6 @@ def get_task_arguments(parents=None): return _post_parse_arguments(**kwargs) -def call_exp_desc_gui(): - """ - Used to call the 'Experiment Description GUI' in the iblscripts repo from a task. Attempts to perform the following: - * parse alyx username from home directory alyx config file (production alyx) - * parses the subject name from pybpod's session user_settings file, - i.e. ../iblrig_params/IBL/experiments/_iblrig_tasks/setups/task_name/sessions/date_dir/user_settings.py - * uses subprocess to call the gui, i.e. iblscripts/deploy/project_procedure_gui/experiment_form.py - - Better implementation is desired. - """ - log.info("Attempting to launch experiment description form...") - - # determine alyx_username - if platform == "win32": - alyx_prod_config_path = Path.home() / "AppData" / "Roaming" / ".one" / ".alyx.internationalbrainlab.org" - else: - alyx_prod_config_path = Path.home() / ".one" / ".alyx.internationalbrainlab.org" - with open(alyx_prod_config_path, "r") as f: - data = json.load(f) - alyx_username = data["ALYX_LOGIN"] - log.info(f"Alyx username set: {alyx_username}") - - # determine currently selected subject in pybpod, hope that the user did not select multiple subjects - subject_name = None - if "user_settings.py" in os.listdir(): - with open("user_settings.py", "r") as f: - lines = f.readlines() - for row in lines: - if "PYBPOD_SUBJECT_EXTRA" in row: - name_index = row.split().index('"name":') - subject_name = row.split()[name_index + 1].strip(",\"") - break - log.info(f"Subject name: {subject_name}") - - if alyx_username and subject_name: - if platform == "win32": # Set path for platform - experiment_form_path = Path("C:\\iblscripts\\deploy\\project_procedure_gui\\experiment_form.py") - else: - experiment_form_path = Path.home() / "Documents/repos/iblscripts/deploy/project_procedure_gui/experiment_form.py" - if experiment_form_path.exists(): # verify iblscripts dir exists in the expected location - cmd = ["python", experiment_form_path, subject_name, alyx_username] # set subprocess command - subprocess.run(cmd) - - def _isdatetime(x: str) -> Optional[bool]: """ Check if string is a date in the format YYYY-MM-DD. @@ -160,33 +112,6 @@ def get_session_path(path: Union[str, Path]) -> Optional[Path]: return sess -def check_transfer(src_session_path: str, dst_session_path: str): - """ - Check all the files in the source directory match those in the destination directory. - :param src_session_path: The source directory that was copied - :param dst_session_path: The copy target directory - :return: - """ - src_files = sorted([x for x in Path(src_session_path).rglob("*") if x.is_file()]) - dst_files = sorted([x for x in Path(dst_session_path).rglob("*") if x.is_file()]) - assert len(src_files) == len(dst_files), "Not all files transferred" - for s, d in zip(src_files, dst_files): - assert s.name == d.name, "file name mismatch" - assert s.stat().st_size == d.stat().st_size, "file size mismatch" - - -def transfer_folder(src: Path, dst: Path, force: bool = False) -> None: - print(f"Attempting to copy:\n{src}\n--> {dst}") - if force: - print(f"Removing {dst}") - shutil.rmtree(dst, ignore_errors=True) - print(f"Copying all files:\n{src}\n--> {dst}") - shutil.copytree(src, dst) - # If folder was created delete the src_flag_file - if check_transfer(src, dst) is None: - print("All files copied") - - def smooth_rolling_window(x, window_len=11, window="blackman"): """ Smooth the data using a window with requested size. diff --git a/iblrig/path_helper.py b/iblrig/path_helper.py index b990e6094..7d59b1edd 100644 --- a/iblrig/path_helper.py +++ b/iblrig/path_helper.py @@ -9,25 +9,104 @@ import yaml from packaging import version +import numpy as np from iblutil.util import Bunch import iblrig +from ibllib.io import session_params log = logging.getLogger("iblrig") -def get_local_and_remote_paths(local_subjects_path=None, remote_subjects_path=None): +def iterate_previous_sessions(subject_name, task_name, n=1, **kwargs): """ - Function used to parse input arguments to transfer commands. If the arguments are None, reads in the settings - and returns the values from the files, otherwise - :param local_subjects_path: - :param remote_subjects_path: + This function iterates over the sessions of a given subject in both the remote and local path + and searches for a given protocol name. It returns the information of the last n found + matching protocols in the form of a dictionary + :param subject_name: + :param task_name: name of the protocol to look for in experiment description : '_iblrig_tasks_trainingChoiceWorld' + :param **kwargs: optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths + if not used, will use the arguments from iblrig/settings/iblrig_settings.yaml + :return: + list of dictionaries with keys: session_path, experiment_description, task_settings, file_task_data + """ + rig_paths = get_local_and_remote_paths(**kwargs) + sessions = _iterate_protocols( + rig_paths.local_subjects_folder.joinpath(subject_name), task_name=task_name, n=n) + if rig_paths.remote_subjects_folder is not None: + remote_sessions = _iterate_protocols( + rig_paths.remote_subjects_folder.joinpath(subject_name), task_name=task_name, n=n) + sessions.extend(remote_sessions) + _, ises = np.unique([s['session_stub'] for s in sessions], return_index=True) + sessions = [sessions[i] for i in ises] + return sessions + + +def _iterate_protocols(subject_folder, task_name, n=1): + """ + This function iterates over the sessions of a given subject and searches for a given protocol name + It will then return the information of the last n found matching protocols in the form of a + dictionary + :param subject_folder: + :param task_name: name of the protocol to look for in experiment description : '_iblrig_tasks_trainingChoiceWorld' + :param n: number of maximum protocols to return :return: + list of dictionaries with keys: session_stub, session_path, experiment_description, task_settings, file_task_data + """ + protocols = [] + if subject_folder is None or Path(subject_folder).exists() is False: + return protocols + for file_experiment in sorted(subject_folder.rglob('_ibl_experiment.description*.yaml'), reverse=True): + session_path = file_experiment.parent + ad = session_params.read_params(file_experiment) + if task_name not in ad['tasks'][0]: + continue + # reversed: we look for the last task first if the protocol ran twice + for ad_task in reversed(ad['tasks']): + adt = ad_task.get(task_name, None) + if not adt: + return + task_settings = iblrig.raw_data_loaders.load_settings(session_path, collection=adt['collection']) + if task_settings.get('NTRIALS', 43) < 42: # we consider that under 42 trials it is a dud session + continue + protocols.append(Bunch({ + 'session_stub': '_'.join(file_experiment.parent.parts[-2:]), # 2019-01-01_001 + 'session_path': file_experiment.parent, + 'task_collection': adt['collection'], + 'experiment_description': ad, + 'task_settings': task_settings, + 'file_task_data': session_path.joinpath(adt['collection'], '_iblrig_taskData.raw.jsonable') + })) + if len(protocols) >= n: + return protocols + return protocols + + +def get_local_and_remote_paths(local_path=None, remote_path=None, lab=None): + """ + Function used to parse input arguments to transfer commands. If the arguments are None, reads in the settings + and returns the values from the files. + local_subects_path alwawys has a fallback on the home directory / ilbrig_data + remote_subjects_path has no fallback and will return None when all options are exhausted + :param local_path: + :param remote_path: + :param lab: + :return: dictionary, with following keys (example output) + {'local_data_folder': PosixPath('C:/iblrigv8_data'), + 'remote_data_folder': PosixPath('Y:/'), + 'local_subjects_folder': PosixPath('C:/iblrigv8_data/mainenlab/Subjects'), + 'remote_subjects_folder': PosixPath('Y:/Subjects')} """ iblrig_settings = load_settings_yaml() - local_subjects_path = local_subjects_path or Path(iblrig_settings['iblrig_local_data_path']) - remote_subjects_path = remote_subjects_path or Path(iblrig_settings['iblrig_remote_data_path']).joinpath('Subjects') - return local_subjects_path, remote_subjects_path + paths = Bunch({'local_data_folder': local_path, 'remote_data_folder': remote_path}) + if paths.local_data_folder is None: + paths.local_data_folder = Path(p) if (p := iblrig_settings['iblrig_local_data_path'])\ + else Path.home().joinpath('iblrig_data') + if paths.remote_data_folder is None: + paths.remote_data_folder = Path(p) if (p := iblrig_settings['iblrig_remote_data_path']) else None + paths.local_subjects_folder = Path(paths.local_data_folder).joinpath(lab or iblrig_settings['ALYX_LAB'] or '', 'Subjects') + paths.remote_subjects_folder = Path(p).joinpath('Subjects') if (p := paths.remote_data_folder) else None + return paths def load_settings_yaml(file_name='iblrig_settings.yaml', mode='raise'): diff --git a/iblrig/raw_data_loaders.py b/iblrig/raw_data_loaders.py index 67f0da4e1..8f142afa1 100644 --- a/iblrig/raw_data_loaders.py +++ b/iblrig/raw_data_loaders.py @@ -374,7 +374,7 @@ def load_camera_gpio(session_path, label: str, as_dicts=False): return gpio -def load_settings(session_path: Union[str, Path]): +def load_settings(session_path: Union[str, Path], collection='raw_behavior_data'): """ Load PyBpod Settings files (.json). @@ -388,7 +388,7 @@ def load_settings(session_path: Union[str, Path]): if session_path is None: log.warning("No data loaded: session_path is None") return - path = Path(session_path).joinpath("raw_behavior_data") + path = Path(session_path).joinpath(collection) path = next(path.glob("_iblrig_taskSettings.raw*.json"), None) if not path: log.warning("No data loaded: could not find raw settings file") diff --git a/iblrig/test/tasks/test_biased_choice_world_family.py b/iblrig/test/tasks/test_biased_choice_world_family.py index c9fe2fed3..b4e52ea24 100644 --- a/iblrig/test/tasks/test_biased_choice_world_family.py +++ b/iblrig/test/tasks/test_biased_choice_world_family.py @@ -18,7 +18,7 @@ def setUp(self) -> None: self.task = BiasedChoiceWorldSession(**TASK_KWARGS) np.random.seed(12345) - def test_task(self): + def test_task(self, reward_set=np.array([0, 1.5])): task = self.task task.create_session() trial_fixtures = get_fixtures() @@ -31,10 +31,9 @@ def test_task(self): trial_type = np.random.choice(['correct', 'error', 'no_go'], p=[.9, .05, .05]) task.trial_completed(trial_fixtures[trial_type]) if trial_type == 'correct': - assert task.trials_table['trial_correct'][task.trial_num] + self.assertTrue(task.trials_table['trial_correct'][task.trial_num]) else: - assert not task.trials_table['trial_correct'][task.trial_num] - + self.assertFalse(task.trials_table['trial_correct'][task.trial_num]) if i == 245: task.show_trial_log() assert not np.isnan(task.reward_time) @@ -53,7 +52,6 @@ def test_task(self): position=pd.NamedAgg(column="position", aggfunc=lambda x: 1 - (np.mean(np.sign(x)) + 1) / 2), first_trial=pd.NamedAgg(column="block_trial_num", aggfunc='first'), ) - # test that the first block is 90 trials assert df_blocks['count'].values[0] == 90 # make all first block trials were reset to 0 @@ -64,6 +62,7 @@ def test_task(self): assert np.all(np.isclose(np.abs(np.diff(df_blocks['stim_probability_left'].values[1:])), 0.6)) # assert the the trial outcomes are within 0.3 of the generating probability np.testing.assert_array_less(np.abs(df_blocks['position'] - df_blocks['stim_probability_left']), 0.4) + np.testing.assert_array_equal(np.unique(task.trials_table['reward_amount']), reward_set) def check_quiescent_period(self): """ @@ -89,7 +88,7 @@ def setUp(self) -> None: self.task = NeuroModulatorChoiceWorldSession(**TASK_KWARGS) def test_task(self): - super(TestNeuroModulatorBiasedChoiceWorld, self).test_task() + super(TestNeuroModulatorBiasedChoiceWorld, self).test_task(reward_set=np.array([0, 1., 1.5, 3.])) # we expect 10% of null feedback trials assert np.abs(.05 - np.mean(self.task.trials_table['omit_feedback'])) < .05 diff --git a/iblrig/test/tasks/test_training_choice_world.py b/iblrig/test/tasks/test_training_choice_world.py index f0a7de51d..f79c014d4 100644 --- a/iblrig/test/tasks/test_training_choice_world.py +++ b/iblrig/test/tasks/test_training_choice_world.py @@ -12,22 +12,22 @@ def setUp(self) -> None: def test_task(self): trial_fixtures = get_fixtures() + ADAPTIVE_REWARD = 1.9 nt = 800 for training_phase in np.arange(6): - task = TrainingChoiceWorldSession(**TASK_KWARGS) + task = TrainingChoiceWorldSession(**TASK_KWARGS, adaptive_reward=ADAPTIVE_REWARD) task.training_phase = training_phase task.create_session() - for i in np.arange(nt): task.next_trial() # pc = task.psychometric_curve() trial_type = np.random.choice(['correct', 'error', 'no_go'], p=[.9, .05, .05]) task.trial_completed(trial_fixtures[trial_type]) if trial_type == 'correct': - assert task.trials_table['trial_correct'][task.trial_num] + self.assertTrue(task.trials_table['trial_correct'][task.trial_num]) + self.assertEqual(task.trials_table['reward_amount'][task.trial_num], ADAPTIVE_REWARD) else: assert not task.trials_table['trial_correct'][task.trial_num] - if i == 245: task.show_trial_log() assert not np.isnan(task.reward_time) diff --git a/iblrig/test/test_choice_world.py b/iblrig/test/test_choice_world.py new file mode 100644 index 000000000..50a268be8 --- /dev/null +++ b/iblrig/test/test_choice_world.py @@ -0,0 +1,154 @@ +""" +Unit tests for task logic functions +""" +from pathlib import Path +import unittest +import copy +import tempfile +import shutil +import json + +import numpy as np +import pandas as pd + +from iblrig.test.base import TASK_KWARGS +from iblrig import session_creator +import iblrig.choiceworld +from iblrig.path_helper import iterate_previous_sessions +from iblrig.raw_data_loaders import load_task_jsonable + +from iblrig_tasks._iblrig_tasks_trainingChoiceWorld.task import Session as TrainingChoiceWorldSession +from iblrig_tasks._iblrig_tasks_passiveChoiceWorld.task import Session as PassiveChoiceWorldSession +from iblrig_tasks._iblrig_tasks_spontaneous.task import Session as SpontaneousSession + + +class TestGetPreviousSession(unittest.TestCase): + + def setUp(self) -> None: + self.kwargs = copy.deepcopy(TASK_KWARGS) + self.kwargs.update({'subject_weight_grams': 25}) + self.td = tempfile.TemporaryDirectory() + self.root_path = Path(self.td.name) + self.kwargs['iblrig_settings'] = dict(iblrig_local_data_path=self.root_path, ALYX_LAB='cortexlab') + self.sesa = SpontaneousSession(**self.kwargs) + self.sesa.create_session() + self.sesb = TrainingChoiceWorldSession(**self.kwargs) + # we make sure that the session has more than 42 trials in the settings, here sesd + # is not returned as it is a dud with no trial and we expect 1 session in history: sesb + self.sesb.session_info['NTRIALS'] = 400 + self.sesb.create_session() + self.sesc = PassiveChoiceWorldSession(**self.kwargs) + self.sesc.create_session() + self.sesd = TrainingChoiceWorldSession(**self.kwargs) + self.sesd.create_session() + + def test_iterate_previous_sessions(self): + previous_sessions = iterate_previous_sessions( + self.kwargs['subject'], task_name='_iblrig_tasks_trainingChoiceWorld', + local_path=Path(self.root_path), lab='cortexlab', n=2) + self.assertEqual(len(previous_sessions), 1) + # here we create a remote path, and copy over the sessions + # then sesb is removed from the local server and sesd gets completed + # we expect sesb from the remote server and sesd from the local server in history + with tempfile.TemporaryDirectory() as tdd: + shutil.copytree(self.root_path.joinpath('cortexlab'), tdd, dirs_exist_ok=True) + shutil.rmtree(self.sesb.paths['SESSION_FOLDER']) + self.sesd.session_info['NTRIALS'] = 400 + self.sesd.save_task_parameters_to_json_file() + previous_sessions = iterate_previous_sessions( + self.kwargs['subject'], task_name='_iblrig_tasks_trainingChoiceWorld', + local_path=self.root_path, remote_path=Path(tdd), lab='cortexlab', n=2) + # we expect 2 sessions, one from the local data path and one from the remote + self.assertEqual(len(previous_sessions), 2) + self.assertEqual(len(set([ps['session_path'].parents[3] for ps in previous_sessions])), 2) + + @staticmethod + def mock_jsonable(file_path, training_phase=3, reward_amount=None): + file_fixtures = Path(__file__).parent.joinpath('fixtures', 'task_data_short.jsonable') + trials_table, bpod_data = load_task_jsonable(file_fixtures) + trials_table['training_phase'] = training_phase + if file_path.exists(): + file_path.unlink() + if reward_amount: + trials_table['reward_amount'] = reward_amount / trials_table.shape[0] + for i, trial in trials_table.iterrows(): + save_dict = trial.to_dict() + save_dict["behavior_data"] = bpod_data[i] + with open(file_path, 'a') as fp: + fp.write(json.dumps(save_dict) + '\n') + + def test_adaptive_training_level(self): + """ + Makes sure that when we create new sessions, the statuses are recovered properly from previous data + :return: + """ + self.mock_jsonable(self.sesb.paths.DATA_FILE_PATH, training_phase=2, reward_amount=1050) + self.sesb.session_info['ADAPTIVE_REWARD_AMOUNT_UL'] = 2.1 + self.sesb.save_task_parameters_to_json_file() + # test the function entry point + result = iblrig.choiceworld.get_subject_training_info( + self.kwargs['subject'], subject_weight_grams=17, local_path=Path(self.root_path), lab='cortexlab', mode='raise') + self.assertEqual((2, 2.1, True), result) + + # test the task instantiation, should be the same as above + t = TrainingChoiceWorldSession(**self.kwargs, training_phase=4, adaptive_reward=2.9) + result = (t.training_phase, t.session_info["ADAPTIVE_REWARD_AMOUNT_UL"]) + self.assertEqual((4, 2.9), result) + # using the method we should get the same as above + self.assertEqual(t.get_subject_training_info(), (2, 2.1)) + + # now the mouse is underfed + self.mock_jsonable(self.sesb.paths.DATA_FILE_PATH, training_phase=1, reward_amount=500) + result = t.get_subject_training_info() + self.assertEqual((1, 2.2), result) + + def tearDown(self) -> None: + self.td.cleanup() + + +class TestAdaptiveReward(unittest.TestCase): + + def test_adaptive_reward(self): + fixture = ( + ((25, 3, 1234, 399), 2.9), + ((25, 3, 1234, 123), 3.0), + ((25, 2.3, 234, 123), 2.4), + ((25, 3, 234, 123), 3), + ((25, 1.5, 1234, 423), 1.5), + ) + + for args, expected in fixture: + print(args, expected) + with self.subTest(args=args): + self.assertEqual(expected, iblrig.choiceworld.compute_adaptive_reward_volume(*args)) + + +class TestsBiasedBlocksGeneration(unittest.TestCase): + + @staticmethod + def count_contrasts(pc): + df = pd.DataFrame(data=pc, columns=['angle', 'contrast', 'proba']) + df['signed_contrasts'] = df['contrast'] * np.sign(df['angle']) + c = df.groupby('signed_contrasts')['signed_contrasts'].count() / pc.shape[0] + return c.values + + def test_default(self): + np.random.seed(7816) + # the default generation has a bias on the 0-contrast + pc, lb = session_creator.make_ephysCW_pc() + c = self.count_contrasts(pc) + assert np.all(np.abs(1 - c * 9) <= 0.2) + + def test_biased(self): + # test biased, signed contrasts are uniform + np.random.seed(7816) + pc, lb = session_creator.make_ephysCW_pc(prob_type='biased') + c = self.count_contrasts(pc) + assert np.all(np.abs(1 - c * 9) <= 0.2) + + def test_uniform(self): + # test uniform: signed contrasts are twice as likely for the 0 sample + pc, lb = session_creator.make_ephysCW_pc(prob_type='uniform') + c = self.count_contrasts(pc) + c[4] /= 2 + assert np.all(np.abs(1 - c * 10) <= 0.2) diff --git a/iblrig/test/test_choice_world_logic.py b/iblrig/test/test_choice_world_logic.py deleted file mode 100644 index bb66ef987..000000000 --- a/iblrig/test/test_choice_world_logic.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Unit tests for task logic functions -""" -import unittest - -import numpy as np -import pandas as pd - -from iblrig import session_creator - - -class TestsBiasedBlocksGeneration(unittest.TestCase): - - @staticmethod - def count_contrasts(pc): - df = pd.DataFrame(data=pc, columns=['angle', 'contrast', 'proba']) - df['signed_contrasts'] = df['contrast'] * np.sign(df['angle']) - c = df.groupby('signed_contrasts')['signed_contrasts'].count() / pc.shape[0] - return c.values - - def test_default(self): - np.random.seed(7816) - # the default generation has a bias on the 0-contrast - pc, lb = session_creator.make_ephysCW_pc() - c = self.count_contrasts(pc) - assert np.all(np.abs(1 - c * 9) <= 0.2) - - def test_biased(self): - # test biased, signed contrasts are uniform - np.random.seed(7816) - pc, lb = session_creator.make_ephysCW_pc(prob_type='biased') - c = self.count_contrasts(pc) - assert np.all(np.abs(1 - c * 9) <= 0.2) - - def test_uniform(self): - # test uniform: signed contrasts are twice as likely for the 0 sample - pc, lb = session_creator.make_ephysCW_pc(prob_type='uniform') - c = self.count_contrasts(pc) - c[4] /= 2 - assert np.all(np.abs(1 - c * 10) <= 0.2) diff --git a/iblrig/test/test_gui.py b/iblrig/test/test_gui.py index edcd83ceb..8b72e7a75 100644 --- a/iblrig/test/test_gui.py +++ b/iblrig/test/test_gui.py @@ -25,10 +25,13 @@ def test_get_task_extra_kwargs(self): parser = self.wizard.get_task_extra_parser(task_name) extra_args = [{act.option_strings[0]: act.type} for act in parser._actions] match task_name: - case '_iblrig_tasks_ephysChoiceWorld' | '_iblrig_tasks_trainingChoiceWorld': - assert len(extra_args) == 2 + case '_iblrig_tasks_trainingChoiceWorld': + expect = 3 + case '_iblrig_tasks_ephysChoiceWorld': + expect = 2 case '_iblrig_tasks_spontaneous' | 'plau_oddBallAudio': - assert len(extra_args) == 0 + expect = 0 case _: print(task_name) - assert len(extra_args) == 1 + expect = 1 + self.assertEqual(expect, len(extra_args)) diff --git a/iblrig/test/test_transfers.py b/iblrig/test/test_transfers.py index 803eee640..ddbab39fe 100644 --- a/iblrig/test/test_transfers.py +++ b/iblrig/test/test_transfers.py @@ -57,8 +57,8 @@ def test_behavior_copy_complete_session(self): with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as td: session = _create_behavior_session(td, ntrials=50, hard_crash=hard_crash) session.paths.SESSION_FOLDER.joinpath('transfer_me.flag').touch() - iblrig.commands.transfer_data(local_subjects_path=session.paths.LOCAL_SUBJECT_FOLDER, - remote_subjects_path=session.paths.REMOTE_SUBJECT_FOLDER) + iblrig.commands.transfer_data(local_path=session.iblrig_settings['iblrig_local_data_path'], + remote_path=session.iblrig_settings['iblrig_remote_data_path']) sc = BehaviorCopier(session_path=session.paths.SESSION_FOLDER, remote_subjects_folder=session.paths.REMOTE_SUBJECT_FOLDER) self.assertEqual(sc.state, 3) @@ -73,10 +73,8 @@ def test_behavior_do_not_copy_dummy_sessions(self): with tempfile.TemporaryDirectory() as td: session = _create_behavior_session(td, ntrials=ntrials) session.paths.SESSION_FOLDER.joinpath('transfer_me.flag').touch() - iblrig.commands.transfer_data( - local_subjects_path=session.paths.LOCAL_SUBJECT_FOLDER, - remote_subjects_path=session.paths.REMOTE_SUBJECT_FOLDER - ) + iblrig.commands.transfer_data(local_path=session.iblrig_settings['iblrig_local_data_path'], + remote_path=session.iblrig_settings['iblrig_remote_data_path']) sc = BehaviorCopier( session_path=session.paths.SESSION_FOLDER, remote_subjects_folder=session.paths.REMOTE_SUBJECT_FOLDER) diff --git a/iblrig/version_management.py b/iblrig/version_management.py index 04074bac3..9a4d3b036 100644 --- a/iblrig/version_management.py +++ b/iblrig/version_management.py @@ -119,7 +119,6 @@ def get_detailed_version_string(v_basic: str) -> str: This method will only work with installations managed through Git. """ - # this method will only work with installations managed through git if not IS_GIT: log.error('This installation of IBLRIG is not managed through git.') return v_basic @@ -148,6 +147,22 @@ def get_detailed_version_string(v_basic: str) -> str: @static_vars(branch=None) def get_branch() -> Union[str, None]: + """ + Get the Git branch of the iblrig installation. + + This function retrieves and caches the Git branch of the iblrig installation. + If the branch is already cached, it returns the cached value. If not, it + attempts to obtain the branch from the Git repository. + + Returns + ------- + Union[str, None] + The Git branch of the iblrig installation, or None if it cannot be determined. + + Notes + ----- + This method will only work with installations managed through Git. + """ if get_branch.branch: return get_branch.branch if not IS_GIT: @@ -162,6 +177,21 @@ def get_branch() -> Union[str, None]: @static_vars(is_fetched_already=False) def get_remote_tags() -> None: + """ + Fetch remote Git tags if not already fetched. + + This function fetches remote Git tags if they have not been fetched already. + If tags are already fetched, it does nothing. If the installation is not + managed through Git, it logs an error. + + Returns + ------- + None + + Notes + ----- + This method will only work with installations managed through Git. + """ if get_remote_tags.is_fetched_already: return if not IS_GIT: @@ -175,6 +205,24 @@ def get_remote_tags() -> None: @static_vars(changelog=None) def get_changelog() -> str: + """ + Retrieve the changelog for the iblrig installation. + + This function retrieves and caches the changelog for the iblrig installation + based on the current Git branch. If the changelog is already cached, it + returns the cached value. If not, it attempts to fetch the changelog from + the GitHub repository or read it locally if the remote fetch fails. + + Returns + ------- + str + The changelog for the iblrig installation. + + Notes + ----- + This method relies on the presence of a CHANGELOG.md file either in the + repository or locally. + """ if get_changelog.changelog: return get_changelog.changelog try: diff --git a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py index 7cd44b6ad..937cfb5c9 100644 --- a/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py +++ b/iblrig_tasks/_iblrig_tasks_ephysChoiceWorld/task.py @@ -32,7 +32,7 @@ def extra_parser(): """ :return: argparse.parser() """ parser = super(Session, Session).extra_parser() parser.add_argument('--session_template_id', option_strings=['--session_template_id'], - dest='session_template_id', default=0, type=int) + dest='session_template_id', default=0, type=int, help='pre-generated session index (zero-based)') return parser diff --git a/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py b/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py index 5cd225fb4..77e4fdc34 100644 --- a/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py +++ b/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py @@ -2,6 +2,7 @@ import iblrig.misc TRAINING_PHASE = -1 +ADAPTIVE_REWARD = -1.0 class Session(TrainingChoiceWorldSession): @@ -12,7 +13,11 @@ def extra_parser(): """ :return: argparse.parser() """ parser = super(Session, Session).extra_parser() parser.add_argument('--training_phase', option_strings=['--training_phase'], - dest='training_phase', default=TRAINING_PHASE, type=int) + dest='training_phase', default=TRAINING_PHASE, type=int, + help='defines the set of contrasts presented to the subject') + parser.add_argument('--adaptive_reward', option_strings=['--adaptive_reward'], + dest='adaptive_reward', default=ADAPTIVE_REWARD, type=float, + help='reward volume in microliters') return parser diff --git a/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task_parameters.yaml b/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task_parameters.yaml index 062a66f33..0d171876e 100644 --- a/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task_parameters.yaml +++ b/iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task_parameters.yaml @@ -1,8 +1,8 @@ 'ADAPTIVE_GAIN': True -'ADAPTIVE_REWARD': False +'ADAPTIVE_REWARD': True 'AG_INIT_VALUE': 8.0 # Adaptive Gain init value (azimuth_degree/mm) 'AG_MIN_VALUE': 4.0 # (azimuth_degree/mm) 'CONTRAST_SET': [1.0, 0.5, 0.25, 0.125, 0.0625, 0.0] 'CONTRAST_SET_PROBABILITY_TYPE': skew_zero # uniform, skew_zero 'DEBIAS': True # Whether to use debiasing rule or not by repeating error trials -'REWARD_AMOUNT_UL': 3.0 # Reward amount (uL) +'REWARD_AMOUNT_UL': 3.0 # Reward amount (uL), will oscillate between 1.5 and 3 uL depending on previous sessions if adaptive_reward is True diff --git a/scripts/move_passive.py b/scripts/move_passive.py deleted file mode 100644 index db9edcbba..000000000 --- a/scripts/move_passive.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -from pathlib import Path - -import iblrig.misc as misc -import iblrig.raw_data_loaders as raw -from iblrig import path_helper - -log = logging.getLogger("iblrig") - -IBLRIG_DATA_PATH = path_helper.get_iblrig_local_data_path() - - -def main(): - passive_sessions = list(IBLRIG_DATA_PATH.rglob("passive_data_for_ephys.flag")) - - # For each passive session found look into passiveSettings to find ephysSession name - # search for the ephys session in the rglobbed ephys sessions - # If you find it just rename and move the folder raw_behavior_data -> raw_passive_data, - # If no find search specifically for that session from the metadata and try to copy the folder - # If folder exists throw an error - log.info(f"Found {len(passive_sessions)} sessions in {IBLRIG_DATA_PATH}") - for ps in passive_sessions: - try: - sett = raw.load_settings(str(ps.parent)) - esess = sett["CORRESPONDING_EPHYS_SESSION"] - if not esess or esess is None: - log.warning("Corresponding ephys session NOT FOUND in settings - data not moved") - return - if not Path(esess).exists(): - log.warning(f"Ephys session {esess}: NOT FOUND on filesystem - data not moved") - return - # Fails if dst_folder exists! - misc.transfer_folder( - str(ps.parent / "raw_behavior_data"), - str(Path(esess) / "raw_passive_data"), - force=False, - ) - log.info(f"Moved passive data to {esess}") - ps.unlink() - except BaseException as e: - log.warning(f"{e}\n Failed to move passive session {ps.parent}") - - -if __name__ == "__main__": - main()