Skip to content

Commit

Permalink
Merge pull request #511 from int-brain-lab/iblrigv8dev
Browse files Browse the repository at this point in the history
Iblrigv8dev
  • Loading branch information
bimac authored Sep 27, 2023
2 parents a1111b0 + b727683 commit 85cd1a3
Show file tree
Hide file tree
Showing 22 changed files with 521 additions and 329 deletions.
7 changes: 0 additions & 7 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,6 @@ jobs:
if: matrix.os == 'ubuntu-latest'
run: sudo apt-get install -y libportaudio2

- name: Install Bonsai (Windows only)
if: matrix.os == 'windows-latest'
shell: pwsh -l {0}
run: |
cd Bonsai
powershell.exe .\install.ps1
- name: Create config files (Ubuntu)
if: matrix.os == 'ubuntu-latest'
run: |
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@ Changelog
---------

-------------------------------
8.10.0
------
* adaptive reward from previous sessions in TrainingChoiceWorld
* updater: fetch remote changelog to advertise new features

8.9.4
-----
Expand Down
9 changes: 6 additions & 3 deletions iblrig/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
# PLEASE REMEMBER TO:
# 1) update CHANGELOG.md
# 2) git tag the release in accordance to the version number below (after merge!)
__version__ = '8.9.4'
# 1) update CHANGELOG.md including changes from the last tag
# 2) Pull request to iblrigv8dev
# 3) Check CI and eventually wet lab test
# 4) Pull request to iblrigv8
# 5) git tag the release in accordance to the version number below (after merge!)
__version__ = '8.10.0'

# The following method call will try to get post-release information (i.e. the number of commits since the last tagged
# release corresponding to the one above), plus information about the state of the local repository (dirty/broken)
Expand Down
86 changes: 66 additions & 20 deletions iblrig/base_choice_world.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,10 @@ def get_state_machine_trial(self, i):
def next_trial(self):
pass

@property
def reward_amount(self):
return self.task_params.REWARD_AMOUNT_UL

def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None):
contrast = contrast or misc.draw_contrast(self.task_params.CONTRAST_SET, self.task_params.CONTRAST_SET_PROBABILITY_TYPE)
assert len(self.task_params.STIM_POSITIONS) == 2, "Only two positions are supported"
Expand All @@ -389,7 +393,7 @@ def draw_next_trial_info(self, pleft=0.5, contrast=None, position=None):
self.trials_table.at[self.trial_num, 'stim_freq'] = self.task_params.STIM_FREQ
self.trials_table.at[self.trial_num, 'trial_num'] = self.trial_num
self.trials_table.at[self.trial_num, 'position'] = position
self.trials_table.at[self.trial_num, 'reward_amount'] = self.task_params.REWARD_AMOUNT_UL
self.trials_table.at[self.trial_num, 'reward_amount'] = self.reward_amount
self.trials_table.at[self.trial_num, 'stim_probability_left'] = pleft
self.send_trial_info_to_bonsai()

Expand Down Expand Up @@ -559,7 +563,18 @@ def get_state_machine_trial(self, i):


class ActiveChoiceWorldSession(ChoiceWorldSession):

"""
The ActiveChoiceWorldSession is a base class for protocols where the mouse is actively making decisions
by turning the wheel. It has the following characteristics
- it is trial based
- it is decision based
- left and right simulus are equiprobable: there is no biased block
- a trial can either be correct / error / no_go depending on the side of the stimulus and the response
- it has a quantifiable performance by computing the proportion of correct trials of passive stimulations protocols or
habituation protocols.
The TrainingChoiceWorld, BiasedChoiceWorld are all subclasses of this class
"""
def __init__(self, **kwargs):
super(ActiveChoiceWorldSession, self).__init__(**kwargs)
self.trials_table['stim_probability_left'] = np.zeros(NTRIALS_INIT, dtype=np.float32)
Expand Down Expand Up @@ -620,6 +635,10 @@ def trial_completed(self, bpod_data):


class BiasedChoiceWorldSession(ActiveChoiceWorldSession):
"""
Biased choice world session is the instantiation of ActiveChoiceWorld where the notion of biased
blocks is introduced.
"""
protocol_name = "_iblrig_tasks_biasedChoiceWorld"

def __init__(self, **kwargs):
Expand Down Expand Up @@ -687,36 +706,63 @@ def show_trial_log(self):


class TrainingChoiceWorldSession(ActiveChoiceWorldSession):
"""
The TrainingChoiceWorldSession corresponds to the first training protocol of the choice world task.
This protocol has a complicated adaptation of the number of contrasts (embodied by the training_phase
property) and the reward amount, embodied by the adaptive_reward property.
"""
protocol_name = "_iblrig_tasks_trainingChoiceWorld"

def __init__(self, training_phase=-1, **kwargs):
def __init__(self, training_phase=-1, adaptive_reward=-1.0, **kwargs):
super(TrainingChoiceWorldSession, self).__init__(**kwargs)
from iblrig.choiceworld import get_training_phase
inferred_training_phase, inferred_adaptive_reward = self.get_subject_training_info()
if training_phase == -1:
try:
training_phase = get_training_phase(self.session_info.SUBJECT_NAME)
self.logger.warning(f"Got training phase: {training_phase}")
except Exception as ex:
self.logger.debug('Failed to get training phase: %s', ex)
if self.interactive:
training_phase = iblrig.graphic.numinput(
"Subject training phase", "Subject training phase: (0-5)",
askint=True, nullable=False, default=0, minval=0, maxval=5)
else:
self.logger.warning(f"Could not get training phase from Alyx: {traceback.format_exc()}, please set it"
f"manually in ./iblrig_tasks/_iblrig_tasks_trainingChoiceWorld/task.py"
f"training phase is set 0 for this session")
training_phase = 0
self.logger.critical(f"Got training phase: {inferred_training_phase}")
self.training_phase = inferred_training_phase
else:
self.logger.warning(f"Training phase manually set to: {training_phase}")
self.training_phase = training_phase
self.logger.critical(f"Training phase manually set to: {training_phase}")
self.training_phase = training_phase
if adaptive_reward == -1:
self.logger.critical(f"Got Adaptive reward {inferred_adaptive_reward} uL")
self.session_info["ADAPTIVE_REWARD_AMOUNT_UL"] = inferred_adaptive_reward
else:
self.logger.critical(f"Adaptive reward manually set to {adaptive_reward} uL")
self.session_info["ADAPTIVE_REWARD_AMOUNT_UL"] = adaptive_reward
self.var = {
"training_phase_trial_counts": np.zeros(6),
"last_10_responses_sides": np.zeros(10),
}
self.trials_table['training_phase'] = np.zeros(NTRIALS_INIT, dtype=np.int8)
self.trials_table['debias_trial'] = np.zeros(NTRIALS_INIT, dtype=bool)

@property
def reward_amount(self):
return self.session_info.get("ADAPTIVE_REWARD_AMOUNT_UL", self.task_params.REWARD_AMOUNT_UL)

def get_subject_training_info(self):
"""
Get the previous session's according to this session parameters and deduce the
training level and adaptive reward amount.
:return:
"""
try:
training_phase, adaptive_reward, _ = choiceworld.get_subject_training_info(
subject_name=self.session_info.SUBJECT_NAME,
subject_weight_grams=self.session_info['SUBJECT_WEIGHT'],
default_reward=self.task_params.REWARD_AMOUNT_UL,
local_path=self.iblrig_settings['iblrig_local_data_path'],
remote_path=self.iblrig_settings['iblrig_remote_data_path'],
lab=self.iblrig_settings['ALYX_LAB'],
task_name=self.protocol_name,
)
except Exception:
self.logger.critical('Failed to get training information from previous subjects: %s', traceback.format_exc())
training_phase, adaptive_reward = (
iblrig.choiceworld.DEFAULT_TRAINING_PHASE, iblrig.choiceworld.DEFAULT_REWARD_VOLUME)
self.logger.critical(f'The mouse will train on level {training_phase} and with reward {adaptive_reward} uL')

return training_phase, adaptive_reward

def compute_performance(self):
"""
Aggregates the trials table to compute the performance of the mouse on each contrast
Expand Down
41 changes: 10 additions & 31 deletions iblrig/base_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,10 @@ def __init__(self, subject=None, task_parameter_file=None, file_hardware_setting
self.iblrig_settings = iblrig.path_helper.load_settings_yaml(file_iblrig_settings or 'iblrig_settings.yaml')
if iblrig_settings is not None:
self.iblrig_settings.update(iblrig_settings)
if self.iblrig_settings['iblrig_local_data_path'] is None:
self.iblrig_settings['iblrig_local_data_path'] = Path.home().joinpath('iblrig_data')
else:
self.iblrig_settings['iblrig_local_data_path'] = Path(self.iblrig_settings['iblrig_local_data_path'])
# Load the tasks settings, from the task folder or override with the input argument
task_parameter_file = task_parameter_file or Path(inspect.getfile(self.__class__)).parent.joinpath('task_parameters.yaml')
self.task_params = Bunch({})
self.wizard = wizard

# first loads the base parameters for a given task
if self.base_parameters_file is not None and self.base_parameters_file.exists():
Expand Down Expand Up @@ -150,17 +147,18 @@ def _init_paths(self, append: bool = False):
DATA_FILE_PATH: contains the bpod trials
>>> C:\iblrigv8_data\mainenlab\Subjects\SWC_043\2019-01-01\001\raw_task_data_00\_iblrig_taskData.raw.jsonable # noqa
"""
rig_computer_paths = iblrig.path_helper.get_local_and_remote_paths(
local_path=self.iblrig_settings['iblrig_local_data_path'],
remote_path=self.iblrig_settings['iblrig_remote_data_path'],
lab=self.iblrig_settings['ALYX_LAB']
)
paths = Bunch({'IBLRIG_FOLDER': Path(iblrig.__file__).parents[1]})
paths.BONSAI = paths.IBLRIG_FOLDER.joinpath('Bonsai', 'Bonsai.exe')
paths.VISUAL_STIM_FOLDER = paths.IBLRIG_FOLDER.joinpath('visual_stim')
paths.LOCAL_SUBJECT_FOLDER = self.iblrig_settings['iblrig_local_data_path'].joinpath(
self.iblrig_settings['ALYX_LAB'] or '', 'Subjects')
paths.REMOTE_SUBJECT_FOLDER = (Path(self.iblrig_settings['iblrig_remote_data_path']).joinpath('Subjects')
if self.iblrig_settings['iblrig_remote_data_path'] else None)
paths.LOCAL_SUBJECT_FOLDER = rig_computer_paths['local_subjects_folder']
paths.REMOTE_SUBJECT_FOLDER = rig_computer_paths['remote_subjects_folder']
# initialize the session path
date_folder = self.iblrig_settings['iblrig_local_data_path'].joinpath(
self.iblrig_settings['ALYX_LAB'] or '',
'Subjects',
date_folder = paths.LOCAL_SUBJECT_FOLDER.joinpath(
self.session_info.SUBJECT_NAME,
self.session_info.SESSION_START_TIME[:10],
)
Expand Down Expand Up @@ -386,7 +384,7 @@ def sigint_handler(*args, **kwargs):
self.logger.critical("Graceful exit")
self.logger.info(f'Session {self.paths.SESSION_RAW_DATA_FOLDER}')
self.session_info.SESSION_END_TIME = datetime.datetime.now().isoformat()
if self.interactive:
if self.interactive and not self.wizard:
self.session_info.POOP_COUNT = graph.numinput(
"Poop count", f"{self.session_info.SUBJECT_NAME} droppings count:", nullable=True, askint=True)
self.save_task_parameters_to_json_file()
Expand Down Expand Up @@ -747,25 +745,6 @@ def start_mixin_rotary_encoder(self):


class ValveMixin:
def get_session_reward_amount(self: object) -> float:
# simply returns the reward amount if no adaptive rewared is used
if not self.task_params.ADAPTIVE_REWARD:
return self.task_params.REWARD_AMOUNT
# simply returns the reward amount if no adaptive rewared is used
if not self.task_params.ADAPTIVE_REWARD:
return self.task_params.REWARD_AMOUNT
else:
raise NotImplementedError
# todo: training choice world reward from session to session
# first session : AR_INIT_VALUE, return
# if total_water_session < (subject_weight / 25):
# minimum(last_reward + AR_STEP, AR_MAX_VALUE) 3 microliters AR_MAX_VALUE
# last ntrials strictly below 200:
# keep the same reward
# trial between 200 and above:
# maximum(last_reward - AR_STEP, AR_MIN_VALUE) 1.5 microliters AR_MIN_VALUE

# when implementing this make sure the test is solid

def init_mixin_valve(self: object):
self.valve = Bunch({})
Expand Down
104 changes: 63 additions & 41 deletions iblrig/choiceworld.py
Original file line number Diff line number Diff line change
@@ -1,58 +1,80 @@
"""
Choice World Task related logic
Choice World Task related logic and functions that translate the task description in
Appendix 2 of the paper into code.
"""
from pathlib import Path

import numpy as np

from ibllib.io import session_params
from iblrig.raw_data_loaders import load_task_jsonable
from iblrig.path_helper import load_settings_yaml
import iblrig.raw_data_loaders
from iblrig.path_helper import iterate_previous_sessions
from iblutil.util import setup_logger

logger = setup_logger('iblrig', level='INFO')

CONTRASTS = 1 / np.array([-1, - 2, -4, -8, -16, np.inf, 16, 8, 4, 2, 1])
DEFAULT_TRAINING_PHASE = 0
DEFAULT_REWARD_VOLUME = 3


def _get_latest_training_phase_from_folder(folder_subjects):
n_retries = 3
c = 0
if folder_subjects is None:
return
for file_experiment in sorted(folder_subjects.rglob('_ibl_experiment.description*.yaml'), reverse=True):
session_path = file_experiment.parent
ad = session_params.read_params(file_experiment)
if '_iblrig_tasks_trainingChoiceWorld' not in ad['tasks'][0]:
continue
for ad_task in ad['tasks']:
adt = ad_task.get('_iblrig_tasks_trainingChoiceWorld', None)
if not adt:
return
trials_data, bpod_data = load_task_jsonable(session_path.joinpath(adt['collection'], '_iblrig_taskData.raw.jsonable'))
if 'training_phase' in trials_data:
training_phase = trials_data['training_phase'].values[-1]
return (training_phase, session_path.parts[-2])
c += 1
if c >= n_retries:
break
def compute_adaptive_reward_volume(subject_weight_g, reward_volume_ul, delivered_volume_ul, ntrials):
"""
If the mouse completed over 200 trials in the previous session, the reward volume is automatically
lowered by 0.1 microliters for the next session, but cannot go lower than a floor of 1.5 microliters.
If the mouse received less than its minimum required daily dose (~1 milliliter/25 grams of body weight)
during the previous session, the reward volume is increased by 0.1 microliters for the next session,
but cannot go above a ceiling of 3 microliters.
:param subject_weight_g: in grams
:param reward_volume_ul: the last reward volume setting in uL
:param delivered_volume_ul: the cumulative water deliverd during the last session in uL
:param n_trials:
:return: adaptive_reward_ul
"""
if subject_weight_g > (delivered_volume_ul / 1000 * 25):
reward_volume_ul += 0.1
elif ntrials > 200:
reward_volume_ul -= 0.1
return np.maximum(np.minimum(reward_volume_ul, 3), 1.5)


def get_training_phase(subject):
def get_subject_training_info(
subject_name, subject_weight_grams=None, task_name='_iblrig_tasks_trainingChoiceWorld',
default_reward=DEFAULT_REWARD_VOLUME, mode='silent', **kwargs):
"""
Goes throught the history of a subject and gets the latest training phase for this subject
:param subject:
:return:
Goes through the history of a subject and gets the latest
training phase and the adaptive reward volume for this subject
:param subject_name:
:param subject_weight_grams: current weight of the subject in grams, if not available, will use the previous session weight
:param default_reward: default reward volume in uL if no previous session is available
:param task_name: name of the protocol to look for in experiment description,
defaults to '_iblrig_tasks_trainingChoiceWorld'
:param mode: 'defaults' or 'raise': if 'defaults' returns default values if no history is found, if 'raise' raises ValueError
:param **kwargs: optional arguments to be passed to iblrig.path_helper.get_local_and_remote_paths
if not used, will use the arguments from iblrig/settings/iblrig_settings.yaml
:return: training_phase (int), default_reward uL (float between 1.5 and 3) and status (True if previous was found,
False if unable and default values were returned)
"""
DEFAULT_PHASE = 0
iblrig_settings = load_settings_yaml()
local_subjects_path = Path(iblrig_settings['iblrig_local_data_path']).joinpath(iblrig_settings['ALYX_LAB'], 'Subjects')
local = _get_latest_training_phase_from_folder(local_subjects_path.joinpath(subject)) or (DEFAULT_PHASE, '0000-00-00')
remote = (DEFAULT_PHASE, '0000-00-00')
if iblrig_settings['iblrig_remote_data_path'] is not None:
remote_subjects_path = Path(iblrig_settings['iblrig_remote_data_path']).joinpath('Subjects')
remote = _get_latest_training_phase_from_folder(remote_subjects_path.joinpath(subject)) or (DEFAULT_PHASE, '0000-00-00')
if remote[1] > local[1]:
return remote[0]
session_info = iterate_previous_sessions(subject_name, task_name=task_name, n=1, **kwargs)
if len(session_info) == 0:
if mode == 'silent':
logger.warning("The training status could not be determined returning default values")
return DEFAULT_TRAINING_PHASE, default_reward, False
elif mode == 'raise':
raise ValueError("The training status could not be determined as no previous sessions were found")
else:
session_info = session_info[0]
trials_data, _ = iblrig.raw_data_loaders.load_task_jsonable(session_info.file_task_data)
previous_reward_volume = (session_info.task_settings.get('ADAPTIVE_REWARD_AMOUNT_UL') or
session_info.task_settings.get('REWARD_AMOUNT_UL'))
adaptive_reward = compute_adaptive_reward_volume(
subject_weight_g=subject_weight_grams or session_info.task_settings['SUBJECT_WEIGHT'],
reward_volume_ul=previous_reward_volume,
delivered_volume_ul=trials_data['reward_amount'].sum(),
ntrials=trials_data.shape[0])
if 'training_phase' in trials_data:
training_phase = trials_data['training_phase'].values[-1]
else:
return local[0]
training_phase = DEFAULT_TRAINING_PHASE
return training_phase, adaptive_reward, True


def training_contrasts_probabilities(phase=1):
Expand Down
Loading

0 comments on commit 85cd1a3

Please sign in to comment.