Skip to content

Commit

Permalink
Preprocessing sieable files removing
Browse files Browse the repository at this point in the history
  • Loading branch information
bclenet committed Oct 24, 2023
1 parent 77515c0 commit 8d907cd
Showing 1 changed file with 136 additions and 144 deletions.
280 changes: 136 additions & 144 deletions narps_open/pipelines/team_08MQ.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,24 @@
from os.path import join
from itertools import product

from nipype import Node, Workflow, MapNode
from nipype import Node, Workflow
from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select
from nipype.interfaces.io import SelectFiles, DataSink
from nipype.interfaces.fsl import (
# General usage
FSLCommand,

# Preprocessing
FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer,
Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM,

Threshold, Info, SUSAN, FLIRT, ApplyXFM, ConvertXFM,
# Analyses
Level1Design, FEATModel, L2Model, FILMGLS,
FLAMEO, Randomise, MultipleRegressDesign, Cluster
FLAMEO, Randomise, MultipleRegressDesign
)
from nipype.interfaces.fsl.utils import Merge as MergeImages
from nipype.algorithms.confounds import CompCor
from nipype.algorithms.modelgen import SpecifyModel
from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform

from narps_open.pipelines import Pipeline
from narps_open.data.task import TaskInformation

Expand All @@ -37,6 +39,29 @@ def __init__(self):
self.team_id = '08MQ'
self.contrast_list = ['1', '2', '3']

def remove_files(_, files):
"""
This method is used in a Function node to fully remove
files generated by a Node, once they aren't needed anymore.
Parameters:
- _: Node input only used for triggering the Node
- files: str or list, a single filename or a list of filenames to remove
"""
from os import remove

if isinstance(files, str):
files = [files]

try:
for file in files:
remove(file)
except OSError as error:
print(error)
else:
print('The following files were successfully deleted.')
print(files)

def get_preprocessing(self):
""" Return a Nipype workflow describing the prerpocessing part of the pipeline """

Expand Down Expand Up @@ -205,6 +230,31 @@ def get_preprocessing(self):
compute_confounds.inputs.merge_method = 'union'
compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime']

# Function Nodes remove_files - Remove sizeable files once they aren't needed
remove_func_0 = Node(Function(
function = self.remove_files,
input_names = ['_', 'files'],
output_names = []
), name = 'remove_func_0')

remove_func_1 = Node(Function(
function = self.remove_files,
input_names = ['_', 'files'],
output_names = []
), name = 'remove_func_1')

remove_func_2 = Node(Function(
function = self.remove_files,
input_names = ['_', 'files'],
output_names = []
), name = 'remove_func_2')

remove_func_3 = Node(Function(
function = self.remove_files,
input_names = ['_', 'files'],
output_names = []
), name = 'remove_func_3')

preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing')
preprocessing.connect([
# Inputs
Expand Down Expand Up @@ -253,12 +303,24 @@ def get_preprocessing(self):
(normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]),
(reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]),
(merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space
(slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]),
(slice_time_correction, compute_confounds, [
('slice_time_corrected_file', 'realigned_file')
]),

# Outputs of preprocessing
(motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]),
(compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]),
(alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')])
(alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]),

# File removals
(motion_correction, remove_func_0, [('out_file', 'files')]),
(slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]),
(slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]),
(smoothing, remove_func_1, [('smoothed_file', '_')]),
(smoothing, remove_func_2, [('smoothed_file', 'files')]),
(alignment_func_to_anat, remove_func_2, [('out_file', '_')]),
(alignment_func_to_anat, remove_func_3, [('out_file', 'files')]),
(alignment_func_to_mni, remove_func_3, [('out', '_')])
])

return preprocessing
Expand Down Expand Up @@ -840,15 +902,14 @@ def get_group_level_analysis_sub_workflow(self, method):
estimate_model.inputs.run_mode = 'ols' # Ordinary least squares
estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')

# Cluster Node -
cluster = MapNode(Cluster(
threshold = 3.1,
out_threshold_file = True
),
name = 'cluster',
iterfield = ['in_file', 'cope_file'],
synchronize = True
)
# Randomise Node -
randomise = Node(Randomise(), name = 'randomise')
randomise.inputs.num_perm = 10000
randomise.inputs.tfce = True
randomise.inputs.vox_p_values = True
randomise.inputs.c_thresh = 0.05
randomise.inputs.tfce_E = 0.01
randomise.inputs.mask = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')

# Compute the number of participants used to do the analysis
nb_subjects = len(self.subject_list)
Expand All @@ -858,181 +919,112 @@ def get_group_level_analysis_sub_workflow(self, method):
base_dir = self.directories.working_dir,
name = f'group_level_analysis_{method}_nsub_{nb_subjects}'
)
group_level_analysis.connect(
[
(
info_source,
select_files,
[('contrast_id', 'contrast_id')],
),
(
info_source,
contrasts,
[('subject_list', 'subject_ids')],
),
(
select_files,
contrasts,
[
('cope', 'copes'),
('varcope', 'varcopes'),
('participants', 'participants_file'),
],
),
(
contrasts,
regressors,
[
('equalRange_id', 'equalRange_id'),
('equalIndifference_id', 'equalIndifference_id')
]
),
(
regressors,
specify_model,
[('regressors', 'regressors')]
)
]
)

group_level_analysis.connect([
(info_source, select_files, [('contrast_id', 'contrast_id')]),
(info_source, contrasts, [('subject_list', 'subject_ids')]),
(select_files, contrasts, [
('cope', 'copes'),
('varcope', 'varcopes'),
('participants', 'participants_file'),
]),
(contrasts, regressors, [
('equalRange_id', 'equalRange_id'),
('equalIndifference_id', 'equalIndifference_id')
]),
(regressors, specify_model, [('regressors', 'regressors')])
])

if method in ('equalRange', 'equalIndifference'):
contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])]

if method == 'equalIndifference':
group_level_analysis.connect([
(
contrasts,
merge_copes,
[('copes_equalIndifference', 'in_files')]
),
(
contrasts,
merge_varcopes,
[('varcopes_equalIndifference', 'in_files')]
)
(contrasts, merge_copes, [('copes_equalIndifference', 'in_files')]),
(contrasts, merge_varcopes, [('varcopes_equalIndifference', 'in_files')])
])

elif method == 'equalRange':
group_level_analysis.connect([
(
contrasts,
merge_copes,
[('copes_equalRange', 'in_files')]
),
(
contrasts,
merge_varcopes,
[('varcopes_equalRange', 'in_files')]
)
(contrasts, merge_copes, [('copes_equalRange', 'in_files')]),
(contrasts, merge_varcopes, [('varcopes_equalRange', 'in_files')])
])


elif method == 'groupComp':
contrasts = [
('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1])
]

group_level_analysis.connect([
(
select_files,
merge_copes,
[('cope', 'in_files')]
),
(
select_files,
merge_varcopes,
[('varcope', 'in_files')]
)
(select_files, merge_copes, [('cope', 'in_files')]),
(select_files, merge_varcopes, [('varcope', 'in_files')])
])

group_level_analysis.connect([
(
merge_copes,
estimate_model,
[('merged_file', 'cope_file')]
),
(
merge_varcopes,
estimate_model,
[('merged_file', 'var_cope_file')]
),
(
specify_model,
estimate_model,
[
('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')
]
),
(
estimate_model,
cluster,
[
('zstats', 'in_file'),
('copes', 'cope_file')
]
),
(
estimate_model,
data_sink,
[
('zstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@zstats"),
('tstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@tstats")
]
),
(
cluster,
data_sink,
[('threshold_file', f"group_level_analysis_{method}_nsub_{nb_subjects}.@thresh")]
)
(merge_copes, estimate_model, [('merged_file', 'cope_file')]),
(merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]),
(specify_model, estimate_model, [
('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')
]),
(merge_copes, randomise, [('merged_file', 'in_file')]),
(specify_model, randomise, [
('design_mat', 'design_mat'),
('design_con', 'tcon')
]),
(randomise, data_sink, [
('t_corrected_p_files',
f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'),
('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat')
]),
(estimate_model, data_sink, [
('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'),
('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats')
])
])


# [INFO] Here we simply return the created workflow
return group_level_analysis

def get_hypotheses_outputs(self):
""" Return the names of the files used by the team to answer the hypotheses of NARPS. """

nb_sub = len(self.subject_list)
files = [
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_pgain', 'zstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_pgain', 'zstat1.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_pgain', 'zstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_pgain', 'zstat1.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_ploss', 'zstat2.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_ploss', 'zstat2.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalIndifference_nsub_{nb_sub}',
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_ploss', 'zstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_equalRange_nsub_{nb_sub}',
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_ploss', 'zstat1.nii.gz'),
join(f'l3_analysis_groupComp_nsub_{nb_sub}',
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'l3_analysis_groupComp_nsub_{nb_sub}',
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_ploss', 'zstat1.nii.gz')
]
return [join(self.directories.output_dir, f) for f in files]

0 comments on commit 8d907cd

Please sign in to comment.