Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8.12.0 #535

Merged
merged 4 commits into from
Oct 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
Changelog
---------

8.12.0
------
* add a trainingPhaseChoiceWorld task to fix the training levels
* bugfix: copy script prompt accepts both upper case and lower case Y to proceed
* bugfix: update-check used incorrect calls for subprocesses

-------------------------------

8.11.5
Expand Down Expand Up @@ -36,6 +42,8 @@ Changelog
* detect duds (less than 42 trials) and offer deletion
* various small bugfixes

-------------------------------

8.10.2
------
* hot-fix parsing of path args in transfer_data
Expand All @@ -54,6 +62,8 @@ Changelog
* adaptive reward from previous sessions in TrainingChoiceWorld
* updater: fetch remote changelog to advertise new features

-------------------------------

8.9.4
-----
* correction for version regex
Expand Down
2 changes: 1 addition & 1 deletion iblrig/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# 3) Check CI and eventually wet lab test
# 4) Pull request to iblrigv8
# 5) git tag the release in accordance to the version number below (after merge!)
__version__ = '8.11.5'
__version__ = '8.12.0'

# The following method call will try to get post-release information (i.e. the number of commits since the last tagged
# release corresponding to the one above), plus information about the state of the local repository (dirty/broken)
Expand Down
2 changes: 1 addition & 1 deletion iblrig/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def _get_copiers(copier: Type[SessionCopier], local_folder: Path, remote_folder:
print('Could not find any sessions to copy to the local server.')
elif interactive:
_print_status(copiers, 'Session states prior to transfer operation:')
if input('\nDo you want to continue? [Y/n] ') not in ('y', ''):
if input('\nDo you want to continue? [Y/n] ').lower() not in ('y', ''):
copiers = list()
return copiers

Expand Down
108 changes: 90 additions & 18 deletions iblrig/test/tasks/test_training_choice_world.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,110 @@
import numpy as np
import pandas as pd

from iblrig_tasks._iblrig_tasks_trainingChoiceWorld.task import Session as TrainingChoiceWorldSession
from iblrig_tasks._iblrig_tasks_trainingPhaseChoiceWorld.task import Session as TrainingPhaseChoiceWorldSession
from iblrig.test.base import TASK_KWARGS, BaseTestCases
from iblrig.test.tasks.test_biased_choice_world_family import get_fixtures


class TestInstantiationTraining(BaseTestCases.CommonTestInstantiateTask):
class TestTrainingPhaseChoiceWorld(BaseTestCases.CommonTestInstantiateTask):

def setUp(self) -> None:
self.task = TrainingChoiceWorldSession(**TASK_KWARGS)

def test_task(self):
"""
This test loops over training phases described in the mice training protocol and runs full
sessions with each training phase parameter
https://docs.google.com/document/d/1RA6wgbWfxD2kGlpNxt0n3HVcW4TEIx8e-YO7k_W1pHs/edit
It then checks for:
- the contrast set
- the presence or absence of debias trials
- the relative frequency of each contrast
:return:
"""
trial_fixtures = get_fixtures()
ADAPTIVE_REWARD = 1.9
nt = 800
for training_phase in np.arange(6):
task = TrainingChoiceWorldSession(**TASK_KWARGS, adaptive_reward=ADAPTIVE_REWARD)
task.training_phase = training_phase
task.create_session()
for i in np.arange(nt):
task.next_trial()
# pc = task.psychometric_curve()
trial_type = np.random.choice(['correct', 'error', 'no_go'], p=[.9, .05, .05])
task.trial_completed(trial_fixtures[trial_type])
if trial_type == 'correct':
self.assertTrue(task.trials_table['trial_correct'][task.trial_num])
self.assertEqual(task.trials_table['reward_amount'][task.trial_num], ADAPTIVE_REWARD)
with self.subTest(training_phase=training_phase):
np.random.seed(12354)
task = TrainingPhaseChoiceWorldSession(
**TASK_KWARGS, adaptive_reward=ADAPTIVE_REWARD, training_level=training_phase)
assert task.training_phase == training_phase
task.create_session()
for i in np.arange(nt):
task.next_trial()
# pc = task.psychometric_curve()
trial_type = np.random.choice(['correct', 'error', 'no_go'], p=[.9, .05, .05])
task.trial_completed(trial_fixtures[trial_type])
if trial_type == 'correct':
self.assertTrue(task.trials_table['trial_correct'][task.trial_num])
self.assertEqual(task.trials_table['reward_amount'][task.trial_num], ADAPTIVE_REWARD)
else:
assert not task.trials_table['trial_correct'][task.trial_num]
assert not np.isnan(task.reward_time)
trials_table = task.trials_table[:task.trial_num].copy()
contrasts = trials_table.groupby(['contrast']).agg(
count=pd.NamedAgg(column='contrast', aggfunc='count'),
).reset_index()
np.testing.assert_equal(trials_table['stim_probability_left'].values, 0.5)
np.testing.assert_equal(np.unique(trials_table['reward_amount'].values), np.array([0, ADAPTIVE_REWARD]))
np.testing.assert_equal(trials_table['training_phase'].values, training_phase)
debias = True
probas = 1
match training_phase:
case 5:
contrast_set = np.array([0, 0.0625, 0.125, 0.25, 1.])
probas = np.array([1, 2, 2, 2, 2])
debias = False
case 4:
contrast_set = np.array([0, 0.0625, 0.125, 0.25, 0.5, 1.])
probas = np.array([1, 2, 2, 2, 2, 2])
case 3:
contrast_set = np.array([0.0625, 0.125, 0.25, 0.5, 1.])
case 2:
contrast_set = np.array([0.125, 0.25, 0.5, 1.])
case 1:
contrast_set = np.array([0.25, 0.5, 1.])
case 0:
contrast_set = np.array([0.5, 1.])

np.testing.assert_equal(contrasts['contrast'].values, contrast_set)
normalized_counts = np.abs((nt / contrast_set.size - contrasts['count'].values))
normalized_counts = normalized_counts * probas / np.sum(probas)
normalized_counts = normalized_counts / (nt / contrast_set.size)
np.testing.assert_array_less(normalized_counts, 0.33)
if debias:
assert np.sum(trials_table['debias_trial']) > 20
else:
assert not task.trials_table['trial_correct'][task.trial_num]
if i == 245:
task.show_trial_log()
assert not np.isnan(task.reward_time)
if training_phase == 5:
assert np.sum(task.trials_table['contrast'] == 0.5) == 0
assert np.sum(trials_table['debias_trial']) == 0


class TestInstantiationTraining(BaseTestCases.CommonTestInstantiateTask):

def setUp(self) -> None:
self.task = TrainingChoiceWorldSession(**TASK_KWARGS)

def test_task(self):
trial_fixtures = get_fixtures()
ADAPTIVE_REWARD = 1.9
nt = 800
task = TrainingChoiceWorldSession(**TASK_KWARGS, adaptive_reward=ADAPTIVE_REWARD)
task.create_session()
for i in np.arange(nt):
task.next_trial()
# pc = task.psychometric_curve()
trial_type = np.random.choice(['correct', 'error', 'no_go'], p=[.9, .05, .05])
task.trial_completed(trial_fixtures[trial_type])
if trial_type == 'correct':
self.assertTrue(task.trials_table['trial_correct'][task.trial_num])
self.assertEqual(task.trials_table['reward_amount'][task.trial_num], ADAPTIVE_REWARD)
else:
assert not task.trials_table['trial_correct'][task.trial_num]
if i == 245:
task.show_trial_log()
assert not np.isnan(task.reward_time)

def test_acquisition_description(self):
ad = self.task.experiment_description
Expand Down
33 changes: 18 additions & 15 deletions iblrig/test/test_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,21 @@ def test_get_task_extra_kwargs(self):
:return:
"""
for task_name in self.wizard.all_tasks:
parser = self.wizard.get_task_extra_parser(task_name)
extra_args = [{act.option_strings[0]: act.type} for act in parser._actions]
match task_name:
case '_iblrig_tasks_advancedChoiceWorld':
expect = 6
case '_iblrig_tasks_trainingChoiceWorld':
expect = 4
case '_iblrig_tasks_ephysChoiceWorld':
expect = 2
case '_iblrig_tasks_spontaneous' | 'plau_oddBallAudio':
expect = 0
case _:
print(task_name)
expect = 1
self.assertEqual(expect, len(extra_args))
with self.subTest(task_name=task_name):
parser = self.wizard.get_task_extra_parser(task_name)
extra_args = [{act.option_strings[0]: act.type} for act in parser._actions]
match task_name:
case '_iblrig_tasks_advancedChoiceWorld':
expect = 6
case '_iblrig_tasks_trainingPhaseChoiceWorld':
expect = 3
case '_iblrig_tasks_trainingChoiceWorld':
expect = 4
case '_iblrig_tasks_ephysChoiceWorld':
expect = 2
case '_iblrig_tasks_spontaneous' | 'plau_oddBallAudio':
expect = 0
case _:
print(task_name)
expect = 1
self.assertEqual(expect, len(extra_args))
4 changes: 2 additions & 2 deletions iblrig/version_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,6 @@ def upgrade() -> int:
check_call(["git", "reset", "--hard"], cwd=BASE_DIR)

check_call(["git", "pull", "--tags"], cwd=BASE_DIR)
check_call([sys.executable, "-m", "pip", "install", "-U", "pip"])
check_call([sys.executable, "-m", "pip", "install", "-U", "-e", "."])
check_call(["pip", "install", "-U", "pip"])
check_call(["pip", "install", "-U", "-e", "."])
return 0
25 changes: 22 additions & 3 deletions iblrig_tasks/_iblrig_tasks_trainingPhaseChoiceWorld/task.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,37 @@
from pathlib import Path
import yaml
from iblrig.base_choice_world import TrainingChoiceWorldSession
import iblrig.misc

# read defaults from task_parameters.yaml
with open(Path(__file__).parent.joinpath('task_parameters.yaml')) as f:
DEFAULTS = yaml.safe_load(f)


class Session(TrainingChoiceWorldSession):
protocol_name = "_iblrig_tasks_trainingPhaseChoiceWorld"
extractor_tasks = ['TrialRegisterRaw', 'ChoiceWorldTrials', 'TrainingStatus']

def __init__(self, *args, **kwargs):
super(Session, self).__init__(*args, **kwargs)
self.training_phase = self.task_params["TRAINING_PHASE"]
def __init__(self, *args, training_level=DEFAULTS["TRAINING_PHASE"], debias=DEFAULTS['DEBIAS'], **kwargs):
super(Session, self).__init__(*args, training_phase=training_level, **kwargs)
self.task_params["TRAINING_PHASE"] = training_level
self.task_params["DEBIAS"] = debias

def check_training_phase(self):
pass

@staticmethod
def extra_parser():
""" :return: argparse.parser() """
parser = super(Session, Session).extra_parser()
parser.add_argument('--training_level', option_strings=['--training_level'],
dest='training_level', default=DEFAULTS["TRAINING_PHASE"], type=int,
help='defines the set of contrasts presented to the subject')
parser.add_argument('--debias', option_strings=['--debias'],
dest='debias', default=DEFAULTS['DEBIAS'], type=bool,
help='uses the debiasing protocol (only applies to levels 0-4)')
return parser


if __name__ == "__main__": # pragma: no cover
kwargs = iblrig.misc.get_task_arguments(parents=[Session.extra_parser()])
Expand Down