Skip to content

Commit

Permalink
[TEST] update tests [REFAC] pep8 and bug correction
Browse files Browse the repository at this point in the history
  • Loading branch information
bclenet committed Oct 4, 2023
1 parent 839895f commit ec0f8f2
Show file tree
Hide file tree
Showing 3 changed files with 221 additions and 68 deletions.
2 changes: 1 addition & 1 deletion narps_open/pipelines/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
'6VV2': None,
'80GC': None,
'94GU': None,
'98BT': None,
'98BT': 'PipelineTeam98BT',
'9Q6R': None,
'9T8E': None,
'9U7M': None,
Expand Down
215 changes: 148 additions & 67 deletions narps_open/pipelines/team_98BT.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
""" Write the work of NARPS team 98BT using Nipype """

from os.path import join
from json import load
from itertools import product

from nipype import Workflow, Node, MapNode
from nipype import Workflow, Node, MapNode, JoinNode
from nipype.interfaces.utility import IdentityInterface, Function, Rename
from nipype.interfaces.io import SelectFiles, DataSink
from nipype.algorithms.misc import Gunzip
Expand All @@ -18,8 +18,10 @@
)
from nipype.interfaces.fsl import ExtractROI
from nipype.algorithms.modelgen import SpecifySPMModel
from niflow.nipype1.workflows.fmri.spm import create_DARTEL_template

from narps_open.pipelines import Pipeline
from narps_open.data.task import Pipeline

class PipelineTeam98BT(Pipeline):
""" A class that defines the pipeline of team 98BT. """
Expand All @@ -28,19 +30,16 @@ def __init__(self):
super().__init__()
self.fwhm = 8.0
self.team_id = '98BT'
self.contrast_list = []

# Get Information from the task
with open(join(self.directories.dataset_dir, 'task-MGT_bold.json'), 'rt') as file:
task_info = load(file)
self.slice_timing = task_info['SliceTiming']
self.number_of_slices = len(self.slice_timing)
self.acquisition_time = self.tr / self.number_of_slices
self.effective_echo_spacing = task_info['EffectiveEchoSpacing']
self.total_readout_time = self.number_of_slices * self.effective_echo_spacing
self.contrast_list = ['0001', '0002', '0003', '0004']

def get_dartel_template_sub_workflow(self):
"""
Create a dartel workflow, as first part of the preprocessing.
DARTEL allows to create a study-specific template in 3D volume space.
This study template can then be used for normalizating each subject’s
scans to the MNI space.
Returns:
- dartel : nipype.WorkFlow
"""
Expand Down Expand Up @@ -83,7 +82,7 @@ def get_dartel_input(structural_files):
rename_dartel.inputs.subject_id = self.subject_list
rename_dartel.inputs.keep_ext = True

dartel_workflow = spm_wf.create_DARTEL_template(name='dartel_workflow')
dartel_workflow = create_DARTEL_template(name = 'dartel_workflow')
dartel_workflow.inputs.inputspec.template_prefix = 'template'

# DataSink Node - store the wanted results in the wanted repository
Expand Down Expand Up @@ -256,7 +255,7 @@ def get_preprocessing_sub_workflow(self):
# Function node remove_temporary_files - remove temporary files
remove_temporary_files = Node(Function(
function = self.remove_temporary_files,
input_names = ['_', 'subject_id', 'run_id', 'result_dir', 'working_dir'],
input_names = ['_', 'subject_id', 'run_id', 'working_dir'],
output_names = []),
name = 'remove_temporary_files')
remove_temporary_files.inputs.working_dir = self.directories.working_dir
Expand Down Expand Up @@ -289,10 +288,10 @@ def get_preprocessing_sub_workflow(self):
(gunzip_anat, segmentation, [('out_file', 'channel_files')]),
(gunzip_func, slice_timing, [('out_file', 'in_files')]),
(slice_timing, motion_correction, [('timecorrected_files', 'in_files')]),
(motion_correction, remove_temporary_files, [('realigned_unwarped_files', 'files')]),
(remove_temporary_files, coregistration, [('files', 'apply_to_files')]),
(motion_correction, remove_temporary_files, [('realigned_unwarped_files', '_')]),
(motion_correction, coregistration, [('realigned_unwarped_files', 'apply_to_files')]),
(gunzip_anat, coregistration, [('out_file', 'target')]),
(remove_temporary_files, extract_first, [('files', 'in_file')]),
(motion_correction, extract_first, [('realigned_unwarped_files', 'in_file')]),
(extract_first, coregistration, [('roi_file', 'source')]),
(selectfiles_preproc, dartel_norm_func, [
('dartel_flow_field', 'flowfield_files'),
Expand Down Expand Up @@ -323,6 +322,10 @@ def get_preprocessing(self):
self.get_preprocessing_sub_workflow()
]

def get_preprocessing_outputs(self):
""" Return the names of the files the preprocessing is supposed to generate. """


def get_parameters_files(
parameters_files, wc2_file, motion_corrected_files, subject_id, working_dir):
"""
Expand Down Expand Up @@ -372,8 +375,8 @@ def get_parameters_files(
data_frame = read_table(file, sep = ' ', header = None)
data_frame['Mean_WM'] = mean_wm[file_id]

new_path = join(result_dir, working_dir, 'parameters_file',
f'parameters_file_sub-{subject_id}_run{"0" + str(file_id + 1)}.tsv')
new_path = join(working_dir, 'parameters_file',
f'parameters_file_sub-{subject_id}_run-{str(file_id + 1).zfill(2)}.tsv')

makedirs(join(working_dir, 'parameters_file'), exist_ok = True)

Expand Down Expand Up @@ -608,6 +611,33 @@ def get_subject_level_analysis(self):

return l1_analysis

def get_subject_level_outputs(self):
""" Return the names of the files the subject level analysis is supposed to generate. """

# Contrat maps
templates = [join(
self.directories.output_dir,
'l1_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\
for contrast_id in self.contrast_list]

# SPM.mat file
templates += [join(
self.directories.output_dir,
'l1_analysis', '_subject_id_{subject_id}', 'SPM.mat')]

# spmT maps
templates += [join(
self.directories.output_dir,
'l1_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\
for contrast_id in self.contrast_list]

# Format with subject_ids
return_list = []
for template in templates:
return_list += [template.format(subject_id = s) for s in self.subject_list]

return return_list

def get_subset_contrasts(file_list, subject_list, participants_file):
"""
Parameters :
Expand Down Expand Up @@ -781,51 +811,102 @@ def get_group_level_analysis_sub_workflow(self, method):

return l2_analysis

def reorganize_results(result_dir, output_dir, n_sub, team_ID):
"""
Reorganize the results to analyze them.
Parameters:
- result_dir: str, directory where results will be stored
- output_dir: str, name of the sub-directory for final results
- n_sub: int, number of subject used for analysis
- team_ID: str, name of the team ID for which we reorganize files
"""
from os.path import join as opj
import os
import shutil
import gzip

h1 = opj(result_dir, output_dir, f"l2_analysis_equalIndifference_nsub_{n_sub}", '_contrast_id_01')
h2 = opj(result_dir, output_dir, f"l2_analysis_equalRange_nsub_{n_sub}", '_contrast_id_01')
h3 = opj(result_dir, output_dir, f"l2_analysis_equalIndifference_nsub_{n_sub}", '_contrast_id_01')
h4 = opj(result_dir, output_dir, f"l2_analysis_equalRange_nsub_{n_sub}", '_contrast_id_01')
h5 = opj(result_dir, output_dir, f"l2_analysis_equalIndifference_nsub_{n_sub}", '_contrast_id_02')
h6 = opj(result_dir, output_dir, f"l2_analysis_equalRange_nsub_{n_sub}", '_contrast_id_02')
h7 = opj(result_dir, output_dir, f"l2_analysis_equalIndifference_nsub_{n_sub}", '_contrast_id_02')
h8 = opj(result_dir, output_dir, f"l2_analysis_equalRange_nsub_{n_sub}", '_contrast_id_02')
h9 = opj(result_dir, output_dir, f"l2_analysis_groupComp_nsub_{n_sub}", '_contrast_id_02')

h = [h1, h2, h3, h4, h5, h6, h7, h8, h9]

repro_unthresh = [opj(filename, "spmT_0002.nii") if i in [4, 5] else opj(filename,
"spmT_0001.nii") for i, filename in enumerate(h)]

repro_thresh = [opj(filename, "_threshold1",
"spmT_0002_thr.nii") if i in [4, 5] else opj(filename,
"_threshold0", "spmT_0001_thr.nii") for i, filename in enumerate(h)]

if not os.path.isdir(opj(result_dir, "NARPS-reproduction")):
os.mkdir(opj(result_dir, "NARPS-reproduction"))

for i, filename in enumerate(repro_unthresh):
f_in = filename
f_out = opj(result_dir, "NARPS-reproduction", f"team_{team_ID}_nsub_{n_sub}_hypo{i+1}_unthresholded.nii")
shutil.copyfile(f_in, f_out)

for i, filename in enumerate(repro_thresh):
f_in = filename
f_out = opj(result_dir, "NARPS-reproduction", f"team_{team_ID}_nsub_{n_sub}_hypo{i+1}_thresholded.nii")
shutil.copyfile(f_in, f_out)

print(f"Results files of team {team_ID} reorganized.")
def get_group_level_outputs(self):
""" Return all names for the files the group level analysis is supposed to generate. """

# Handle equalRange and equalIndifference
parameters = {
'contrast_id': self.contrast_list,
'method': ['equalRange', 'equalIndifference'],
'file': [
'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat',
'spmT_0001.nii', 'spmT_0002.nii',
join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii')
],
'nb_subjects' : [str(len(self.subject_list))]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'l2_analysis_{method}_nsub_{nb_subjects}',
'_contrast_id_{contrast_id}',
'{file}'
)

return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

# Handle groupComp
parameters = {
'contrast_id': self.contrast_list,
'method': ['groupComp'],
'file': [
'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii',
join('_threshold0', 'spmT_0001_thr.nii')
],
'nb_subjects' : [str(len(self.subject_list))]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'l2_analysis_{method}_nsub_{nb_subjects}',
'_contrast_id_{contrast_id}',
'{file}'
)

return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

return return_list

def get_hypotheses_outputs(self):
""" Return all hypotheses output file names. """
nb_sub = len(self.subject_list)
files = [
# Hypothesis 1
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0001', 'spmT_0001.nii'),
# Hypothesis 2
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0001', 'spmT_0001.nii'),
# Hypothesis 3
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0001', 'spmT_0001.nii'),
# Hypothesis 4
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0001', 'spmT_0001.nii'),
# Hypothesis 5
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0002', '_threshold0', 'spmT_0002_thr.nii'),
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0002', 'spmT_0002.nii'),
# Hypothesis 6
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0002', '_threshold1', 'spmT_0002_thr.nii'),
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0002', 'spmT_0002.nii'),
# Hypothesis 7
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_0002', 'spmT_0001.nii'),
# Hypothesis 8
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0002', '_threshold1', 'spmT_0001_thr.nii'),
join(f'l2_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_0002', 'spmT_0001.nii'),
# Hypothesis 9
join(f'l2_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'),
join(f'l2_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_0002', 'spmT_0001.nii')
]
return [join(self.directories.output_dir, f) for f in files]
72 changes: 72 additions & 0 deletions tests/pipelines/test_team_98BT.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#!/usr/bin/python
# coding: utf-8

""" Tests of the 'narps_open.pipelines.team_98BT' module.
Launch this test with PyTest
Usage:
======
pytest -q test_team_98BT.py
pytest -q test_team_98BT.py -k <selected_test>
"""

from pytest import helpers, mark
from nipype import Workflow

from narps_open.pipelines.team_98BT import PipelineTeam98BT

class TestPipelinesTeam98BT:
""" A class that contains all the unit tests for the PipelineTeam98BT class."""

@staticmethod
@mark.unit_test
def test_create():
""" Test the creation of a PipelineTeam98BT object """

pipeline = PipelineTeam98BT()

# 1 - check the parameters
assert pipeline.fwhm == 8.0
assert pipeline.team_id == '98BT'

# 2 - check workflows
processing = pipeline.get_preprocessing()
assert len(processing) == 2
for sub_workflow in processing:
assert isinstance(sub_workflow, Workflow)

assert pipeline.get_run_level_analysis() is None
assert isinstance(pipeline.get_subject_level_analysis(), Workflow)

group_level = pipeline.get_group_level_analysis()
assert len(group_level) == 3
for sub_workflow in group_level:
assert isinstance(sub_workflow, Workflow)

@staticmethod
@mark.unit_test
def test_outputs():
""" Test the expected outputs of a PipelineTeam98BT object """
pipeline = PipelineTeam98BT()
# 1 - 1 subject outputs
pipeline.subject_list = ['001']
assert len(pipeline.get_preprocessing_outputs()) == 0
assert len(pipeline.get_run_level_outputs()) == 0
assert len(pipeline.get_subject_level_outputs()) == 7
assert len(pipeline.get_group_level_outputs()) == 63
assert len(pipeline.get_hypotheses_outputs()) == 18

# 2 - 4 subjects outputs
pipeline.subject_list = ['001', '002', '003', '004']
assert len(pipeline.get_preprocessing_outputs()) == 0
assert len(pipeline.get_run_level_outputs()) == 0
assert len(pipeline.get_subject_level_outputs()) == 28
assert len(pipeline.get_group_level_outputs()) == 63
assert len(pipeline.get_hypotheses_outputs()) == 18

@staticmethod
@mark.pipeline_test
def test_execution():
""" Test the execution of a PipelineTeam98BT and compare results """
helpers.test_pipeline_evaluation('98BT')

0 comments on commit ec0f8f2

Please sign in to comment.