Skip to content

Commit

Permalink
[TEST] add tests for X19V
Browse files Browse the repository at this point in the history
  • Loading branch information
bclenet committed Feb 1, 2024
1 parent 748f253 commit b388419
Show file tree
Hide file tree
Showing 4 changed files with 190 additions and 41 deletions.
2 changes: 1 addition & 1 deletion narps_open/pipelines/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
'UK24': None,
'V55J': None,
'VG39': None,
'X19V': None,
'X19V': 'PipelineTeamX19V',
'X1Y5': None,
'X1Z4': None,
'XU70': None
Expand Down
88 changes: 48 additions & 40 deletions narps_open/pipelines/team_X19V.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,18 +99,18 @@ def get_subject_information(event_file):
regressors = None)
]

def get_parameters_file(filepath, subject_id, run_id, working_dir):
def get_confounds_file(filepath, subject_id, run_id, working_dir):
"""
Create a tsv file with only desired parameters per subject per run.
Create a tsv file with only desired confounds per subject per run.
Parameters :
- filepath : path to the subject parameters file (i.e. one per run)
- filepath : path to the subject confounds file (i.e. one per run)
- subject_id : subject for whom the 1st level analysis is made
- run_id: run for which the 1st level analysis is made
- working_dir: str, name of the directory for intermediate results
Return :
- parameters_file : paths to new files containing only desired parameters.
- confounds_file : paths to new files containing only desired confounds.
"""
from os import makedirs
from os.path import join
Expand All @@ -122,18 +122,18 @@ def get_parameters_file(filepath, subject_id, run_id, working_dir):
temp_list = array([
data_frame['X'], data_frame['Y'], data_frame['Z'],
data_frame['RotX'], data_frame['RotY'], data_frame['RotZ']])
retained_parameters = DataFrame(transpose(temp_list))
retained_confounds = DataFrame(transpose(temp_list))

parameters_file = join(working_dir, 'parameters_file',
f'parameters_file_sub-{subject_id}_run-{run_id}.tsv')
confounds_file = join(working_dir, 'confounds_files',
f'confounds_file_sub-{subject_id}_run-{run_id}.tsv')

makedirs(join(working_dir, 'parameters_file'), exist_ok = True)
makedirs(join(working_dir, 'confounds_files'), exist_ok = True)

with open(parameters_file, 'w') as writer:
writer.write(retained_parameters.to_csv(
with open(confounds_file, 'w') as writer:
writer.write(retained_confounds.to_csv(
sep = '\t', index = False, header = False, na_rep = '0.0'))

return parameters_file
return confounds_file

def get_run_level_analysis(self):
"""
Expand Down Expand Up @@ -190,13 +190,13 @@ def get_run_level_analysis(self):
specify_model.inputs.input_units = 'secs'
specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime']

# Function Node get_parameters_file - Get files with movement parameters
parameters = Node(Function(
function = self.get_parameters_file,
# Function Node get_confounds_file - Get files with movement confounds
confounds = Node(Function(
function = self.get_confounds_file,
input_names = ['filepath', 'subject_id', 'run_id', 'working_dir'],
output_names = ['parameters_file']),
name = 'parameters')
parameters.inputs.working_dir = self.directories.working_dir
output_names = ['confounds_file']),
name = 'confounds')
confounds.inputs.working_dir = self.directories.working_dir

# Level1Design Node - Generate files for run level computation
model_design = Node(Level1Design(), name = 'model_design')
Expand All @@ -221,13 +221,13 @@ def get_run_level_analysis(self):
('subject_id', 'subject_id'),
('run_id', 'run_id')]),
(select_files, subject_information, [('event', 'event_file')]),
(select_files, parameters, [('param', 'filepath')]),
(information_source, parameters, [
(select_files, confounds, [('param', 'filepath')]),
(information_source, confounds, [
('subject_id', 'subject_id'),
('run_id', 'run_id')]),
(select_files, skull_stripping_func, [('func', 'in_file')]),
(skull_stripping_func, smoothing_func, [('out_file', 'in_file')]),
(parameters, specify_model, [('parameters_file', 'realignment_parameters')]),
(confounds, specify_model, [('confounds_file', 'realignment_parameters')]),
(smoothing_func, specify_model, [('out_file', 'functional_runs')]),
(subject_information, specify_model, [('subject_info', 'subject_info')]),
(specify_model, model_design, [('session_info', 'session_info')]),
Expand Down Expand Up @@ -288,21 +288,18 @@ def get_run_level_outputs(self):
'run_id' : self.run_list,
'subject_id' : self.subject_list,
'contrast_id' : self.contrast_list,
'file' : [
join('results', 'cope{contrast_id}.nii.gz'),
join('results', 'tstat{contrast_id}.nii.gz'),
join('results', 'varcope{contrast_id}.nii.gz'),
join('results', 'zstat{contrast_id}.nii.gz'),
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}'
)

output_dir = join(self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}')
templates = [
join(output_dir, 'results', 'cope{contrast_id}.nii.gz'),
join(output_dir, 'results', 'tstat{contrast_id}.nii.gz'),
join(output_dir, 'results', 'varcope{contrast_id}.nii.gz'),
join(output_dir, 'results', 'zstat{contrast_id}.nii.gz')
]
return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]
for parameter_values in parameter_sets for template in templates]

return return_list

Expand Down Expand Up @@ -403,19 +400,31 @@ def get_subject_level_outputs(self):
parameters = {
'contrast_id' : self.contrast_list,
'subject_id' : self.subject_list,
'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz',
'sub-{subject_id}_task-MGT_run-01_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz'
]
'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz']
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}'
)
return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

return [template.format(**dict(zip(parameters.keys(), parameter_values)))\
parameters = {
'contrast_id' : self.contrast_list,
'subject_id' : self.subject_list,
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}',
'sub-{subject_id}_task-MGT_run-01_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz'
)
return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

return return_list

def get_one_sample_t_test_regressors(subject_list: list) -> dict:
"""
Create dictionary of regressors for one sample t-test group analysis.
Expand Down Expand Up @@ -730,13 +739,12 @@ def get_group_level_outputs(self):
'tstat2.nii.gz',
'zstat1.nii.gz',
'zstat2.nii.gz'
],
'nb_subjects' : [str(len(self.subject_list))]
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'group_level_analysis_{method}_nsub_{nb_subjects}',
'group_level_analysis_{method}_nsub_'+f'{len(self.subject_list)}',
'_contrast_id_{contrast_id}',
'{file}'
)
Expand All @@ -754,7 +762,7 @@ def get_group_level_outputs(self):
return_list += [join(
self.directories.output_dir,
f'group_level_analysis_groupComp_nsub_{len(self.subject_list)}',
'_contrast_id_2', f'{file}') for file in files]
'_contrast_id_2', file) for file in files] # TODO contrast ID 2 only ????

return return_list

Expand Down
138 changes: 138 additions & 0 deletions tests/pipelines/test_team_X19V.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
#!/usr/bin/python
# coding: utf-8

""" Tests of the 'narps_open.pipelines.team_X19V' module.
Launch this test with PyTest
Usage:
======
pytest -q test_team_X19V.py
pytest -q test_team_X19V.py -k <selected_test>
"""
from os import mkdir
from os.path import join, exists
from shutil import rmtree
from filecmp import cmp

from pytest import helpers, mark, fixture
from numpy import isclose
from nipype import Workflow
from nipype.interfaces.base import Bunch

from narps_open.utils.configuration import Configuration
from narps_open.pipelines.team_X19V import PipelineTeamX19V

TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_X19V')

@fixture
def remove_test_dir():
""" A fixture to remove temporary directory created by tests """

rmtree(TEMPORARY_DIR, ignore_errors = True)
mkdir(TEMPORARY_DIR)
yield # test runs here
#rmtree(TEMPORARY_DIR, ignore_errors = True)

def compare_float_2d_arrays(array_1, array_2):
""" Assert array_1 and array_2 are close enough """

assert len(array_1) == len(array_2)
for reference_array, test_array in zip(array_1, array_2):
assert len(reference_array) == len(test_array)
assert isclose(reference_array, test_array).all()

class TestPipelinesTeamX19V:
""" A class that contains all the unit tests for the PipelineTeamX19V class."""

@staticmethod
@mark.unit_test
def test_create():
""" Test the creation of a PipelineTeamX19V object """

pipeline = PipelineTeamX19V()

# 1 - check the parameters
assert pipeline.fwhm == 5.0
assert pipeline.team_id == 'X19V'

# 2 - check workflows
assert pipeline.get_preprocessing() is None
assert pipeline.get_run_level_analysis() is None
assert isinstance(pipeline.get_subject_level_analysis(), Workflow)
group_level = pipeline.get_group_level_analysis()
assert len(group_level) == 3
for sub_workflow in group_level:
assert isinstance(sub_workflow, Workflow)

@staticmethod
@mark.unit_test
def test_outputs():
""" Test the expected outputs of a PipelineTeamX19V object """

pipeline = PipelineTeamX19V()

# 1 - 1 subject outputs
pipeline.subject_list = ['001']
helpers.test_pipeline_outputs(pipeline, [0, 4*1 + 4*4*4*1, 4*4*1 + 4*1, 0, 18])

# 2 - 4 subjects outputs
pipeline.subject_list = ['001', '002', '003', '004']
helpers.test_pipeline_outputs(pipeline, [0, 4*4 + 4*4*4*4, 4*4 + 4*4, 0, 18])

@staticmethod
@mark.unit_test
def test_subject_information():
""" Test the get_subject_information method """

# Get test files
test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv')

# Prepare several scenarii
info_ok = PipelineTeamX19V.get_subject_information(test_file)

# Compare bunches to expected
bunch = info_ok[0]
assert isinstance(bunch, Bunch)
assert bunch.conditions == ['trial', 'gain', 'loss']
compare_float_2d_arrays(bunch.onsets, [
[4.071, 11.834, 19.535, 27.535, 36.435],
[4.071, 11.834, 19.535, 27.535, 36.435],
[4.071, 11.834, 19.535, 27.535, 36.435]])
compare_float_2d_arrays(bunch.durations, [
[4.0, 4.0, 4.0, 4.0, 4.0],
[4.0, 4.0, 4.0, 4.0, 4.0],
[4.0, 4.0, 4.0, 4.0, 4.0]])
compare_float_2d_arrays(bunch.amplitudes, [
[1.0, 1.0, 1.0, 1.0, 1.0],
[-8.4, 11.6, 15.6, -12.4, -6.4],
[-8.2, -0.2, 4.8, 0.8, 2.8]])
assert bunch.regressor_names == None
assert bunch.regressors == None

@staticmethod
@mark.unit_test
def test_confounds_file(remove_test_dir):
""" Test the get_confounds_file method """

confounds_file = join(
Configuration()['directories']['test_data'], 'pipelines', 'confounds.tsv')
reference_file = join(
Configuration()['directories']['test_data'], 'pipelines', 'team_X19V', 'confounds.tsv')

# Get new confounds file
PipelineTeamX19V.get_confounds_file(confounds_file, 'sid', 'rid', TEMPORARY_DIR)

# Check confounds file was created
created_confounds_file = join(
TEMPORARY_DIR, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv')
assert exists(created_confounds_file)

# Check contents
assert cmp(reference_file, created_confounds_file)

@staticmethod
@mark.pipeline_test
def test_execution():
""" Test the execution of a PipelineTeamX19V and compare results """
helpers.test_pipeline_evaluation('X19V')
3 changes: 3 additions & 0 deletions tests/test_data/pipelines/team_X19V/confounds.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
0.0 0.0 0.0 0.0 -0.0 0.0
-0.00996895 -0.0313444 -3.00931e-06 0.00132687 -0.000384193 -0.00016819
-2.56954e-05 -0.00923735 0.0549667 0.000997278 -0.00019745 -0.000398988

0 comments on commit b388419

Please sign in to comment.