From 46c67a3a0193de253425290b1cc014304a5468be Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 19 Mar 2024 19:46:58 +0100 Subject: [PATCH 1/3] sty: configure and run ruff --- pyproject.toml | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index ebfd20d2..22071fc9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -217,3 +217,48 @@ ignore-words-list = 'nd,mapp,reson' skip = """ ./.git,*.pdf,*.svg,*.min.js,*.ipynb,ORIGINAL_LICENSE,\ ./docs/source/_static/example_anatreport.html""" + +[tool.ruff] +line-length = 99 + +[tool.ruff.lint] +extend-select = [ + "F", + "E", + "W", + "I", + "UP", + "YTT", + "S", + "BLE", + "B", + "A", + # "CPY", + "C4", + "DTZ", + "T10", + # "EM", + "EXE", + "FA", + "ISC", + "ICN", + "PT", + "Q", +] +extend-ignore = [ + "S311", # We are not using random for cryptographic purposes + "ISC001", + "S603", +] + +[tool.ruff.lint.flake8-quotes] +inline-quotes = "single" + +[tool.ruff.lint.extend-per-file-ignores] +"*/test_*.py" = ["S101"] +"fmriprep/utils/debug.py" = ["A002", "T100"] +"docs/conf.py" = ["A001"] +"docs/sphinxext/github_link.py" = ["BLE001"] + +[tool.ruff.format] +quote-style = "single" From 9876ac4648de03b2fb493ded74f52761169f2570 Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Tue, 19 Mar 2024 19:47:53 +0100 Subject: [PATCH 2/3] sty: automatic changes by ruff --- mriqc/__init__.py | 6 +- mriqc/__main__.py | 7 +- mriqc/_warnings.py | 6 +- mriqc/bin/abide2bids.py | 79 ++-- mriqc/bin/dfcheck.py | 53 +-- mriqc/bin/fs2gif.py | 184 ++++---- mriqc/bin/labeler.py | 38 +- mriqc/bin/messages.py | 38 +- mriqc/bin/mriqcwebapi_test.py | 28 +- mriqc/bin/nib_hash.py | 7 +- mriqc/bin/subject_wrangler.py | 106 ++--- mriqc/cli/parser.py | 381 ++++++++--------- mriqc/cli/run.py | 52 +-- mriqc/cli/version.py | 15 +- mriqc/cli/workflow.py | 11 +- mriqc/config.py | 207 ++++----- mriqc/conftest.py | 47 +- mriqc/data/config.py | 4 +- mriqc/engine/plugin.py | 78 ++-- mriqc/instrumentation/__main__.py | 18 +- mriqc/instrumentation/resources.py | 56 +-- mriqc/instrumentation/viz.py | 44 +- mriqc/interfaces/__init__.py | 28 +- mriqc/interfaces/anatomical.py | 251 +++++------ mriqc/interfaces/bids.py | 65 +-- mriqc/interfaces/common/conform_image.py | 23 +- mriqc/interfaces/common/ensure_size.py | 41 +- mriqc/interfaces/datalad.py | 36 +- mriqc/interfaces/diffusion.py | 239 +++++------ mriqc/interfaces/functional.py | 306 ++++++------- mriqc/interfaces/reports.py | 37 +- mriqc/interfaces/synthstrip.py | 41 +- mriqc/interfaces/transitional.py | 28 +- mriqc/interfaces/webapi.py | 193 ++++----- mriqc/messages.py | 36 +- mriqc/qc/anatomical.py | 34 +- mriqc/qc/functional.py | 16 +- mriqc/qc/tests/test_anatomical.py | 10 +- mriqc/reports/group.py | 263 ++++++------ mriqc/reports/individual.py | 69 +-- mriqc/synthstrip/__main__.py | 8 +- mriqc/synthstrip/cli.py | 76 ++-- mriqc/synthstrip/model.py | 18 +- mriqc/testing.py | 10 +- mriqc/tests/test_config.py | 28 +- mriqc/tests/test_reports.py | 33 +- mriqc/utils/bids.py | 78 ++-- mriqc/utils/debug.py | 2 +- mriqc/utils/misc.py | 86 ++-- mriqc/utils/telemetry.py | 2 +- mriqc/workflows/__init__.py | 4 +- mriqc/workflows/anatomical/base.py | 468 ++++++++++---------- mriqc/workflows/anatomical/output.py | 199 ++++----- mriqc/workflows/core.py | 11 +- mriqc/workflows/diffusion/base.py | 336 +++++++-------- mriqc/workflows/diffusion/output.py | 330 +++++++-------- mriqc/workflows/functional/base.py | 518 +++++++++++------------ mriqc/workflows/functional/output.py | 308 +++++++------- mriqc/workflows/shared.py | 43 +- mriqc/workflows/utils.py | 44 +- 60 files changed, 2913 insertions(+), 2870 deletions(-) diff --git a/mriqc/__init__.py b/mriqc/__init__.py index 2c7889d6..663b39cc 100644 --- a/mriqc/__init__.py +++ b/mriqc/__init__.py @@ -27,6 +27,6 @@ """ from mriqc._version import __version__ -__copyright__ = "Copyright 2022, The NiPreps Developers" -__download__ = f"https://github.com/nipreps/mriqc/archive/{__version__}.tar.gz" -__all__ = ["__version__", "__copyright__", "__download__"] +__copyright__ = 'Copyright 2022, The NiPreps Developers' +__download__ = f'https://github.com/nipreps/mriqc/archive/{__version__}.tar.gz' +__all__ = ['__version__', '__copyright__', '__download__'] diff --git a/mriqc/__main__.py b/mriqc/__main__.py index 57b233f3..14a94a0e 100644 --- a/mriqc/__main__.py +++ b/mriqc/__main__.py @@ -22,11 +22,12 @@ # from .cli.run import main -if __name__ == "__main__": +if __name__ == '__main__': import sys + from . import __name__ as module # `python -m ` typically displays the command as __main__.py - if "__main__.py" in sys.argv[0]: - sys.argv[0] = f"{sys.executable} -m {module}" + if '__main__.py' in sys.argv[0]: + sys.argv[0] = f'{sys.executable} -m {module}' main() diff --git a/mriqc/_warnings.py b/mriqc/_warnings.py index 9aad8070..f858f749 100644 --- a/mriqc/_warnings.py +++ b/mriqc/_warnings.py @@ -24,7 +24,7 @@ import logging import warnings -_wlog = logging.getLogger("py.warnings") +_wlog = logging.getLogger('py.warnings') _wlog.addHandler(logging.NullHandler()) @@ -32,9 +32,9 @@ def _warn(message, category=None, stacklevel=1, source=None): """Redefine the warning function.""" if category is not None: category = type(category).__name__ - category = category.replace("type", "WARNING") + category = category.replace('type', 'WARNING') - logging.getLogger("py.warnings").warning(f"{category or 'WARNING'}: {message}") + logging.getLogger('py.warnings').warning(f"{category or 'WARNING'}: {message}") def _showwarning(message, category, filename, lineno, file=None, line=None): diff --git a/mriqc/bin/abide2bids.py b/mriqc/bin/abide2bids.py index e3aaa665..9a9d68b1 100644 --- a/mriqc/bin/abide2bids.py +++ b/mriqc/bin/abide2bids.py @@ -34,42 +34,43 @@ from xml.etree import ElementTree as et import numpy as np + from mriqc.bin import messages def main(): """Entry point.""" parser = ArgumentParser( - description="ABIDE2BIDS downloader.", + description='ABIDE2BIDS downloader.', formatter_class=RawTextHelpFormatter, ) - g_input = parser.add_argument_group("Inputs") - g_input.add_argument("-i", "--input-abide-catalog", action="store", required=True) + g_input = parser.add_argument_group('Inputs') + g_input.add_argument('-i', '--input-abide-catalog', action='store', required=True) g_input.add_argument( - "-n", "--dataset-name", action="store", default="ABIDE Dataset" + '-n', '--dataset-name', action='store', default='ABIDE Dataset' ) g_input.add_argument( - "-u", "--nitrc-user", action="store", default=os.getenv("NITRC_USER") + '-u', '--nitrc-user', action='store', default=os.getenv('NITRC_USER') ) g_input.add_argument( - "-p", - "--nitrc-password", - action="store", - default=os.getenv("NITRC_PASSWORD"), + '-p', + '--nitrc-password', + action='store', + default=os.getenv('NITRC_PASSWORD'), ) - g_outputs = parser.add_argument_group("Outputs") - g_outputs.add_argument("-o", "--output-dir", action="store", default="ABIDE-BIDS") + g_outputs = parser.add_argument_group('Outputs') + g_outputs.add_argument('-o', '--output-dir', action='store', default='ABIDE-BIDS') opts = parser.parse_args() if opts.nitrc_user is None or opts.nitrc_password is None: - raise RuntimeError("NITRC user and password are required") + raise RuntimeError('NITRC user and password are required') dataset_desc = { - "BIDSVersion": "1.0.0rc3", - "License": "CC Attribution-NonCommercial-ShareAlike 3.0 Unported", - "Name": opts.dataset_name, + 'BIDSVersion': '1.0.0rc3', + 'License': 'CC Attribution-NonCommercial-ShareAlike 3.0 Unported', + 'Name': opts.dataset_name, } out_dir = op.abspath(opts.output_dir) @@ -79,22 +80,22 @@ def main(): if exc.errno != errno.EEXIST: raise exc - with open(op.join(out_dir, "dataset_description.json"), "w") as dfile: + with open(op.join(out_dir, 'dataset_description.json'), 'w') as dfile: json.dump(dataset_desc, dfile) catalog = et.parse(opts.input_abide_catalog).getroot() - urls = [el.get("URI") for el in catalog.iter() if el.get("URI") is not None] + urls = [el.get('URI') for el in catalog.iter() if el.get('URI') is not None] pool = Pool() args_list = [(url, opts.nitrc_user, opts.nitrc_password, out_dir) for url in urls] res = pool.map(fetch, args_list) - tsv_data = np.array([("subject_id", "site_name")] + res) + tsv_data = np.array([('subject_id', 'site_name')] + res) np.savetxt( - op.join(out_dir, "participants.tsv"), + op.join(out_dir, 'participants.tsv'), tsv_data, - fmt="%s", - delimiter="\t", + fmt='%s', + delimiter='\t', ) @@ -124,46 +125,46 @@ def fetch(args: Tuple[str, str, str, str]) -> Tuple[str, str]: else: out_dir = op.abspath(out_dir) - pkg_id = [u[9:] for u in url.split("/") if u.startswith("NITRC_IR_")][0] - sub_file = op.join(tmpdir, "%s.zip" % pkg_id) + pkg_id = [u[9:] for u in url.split('/') if u.startswith('NITRC_IR_')][0] + sub_file = op.join(tmpdir, '%s.zip' % pkg_id) - cmd = ["curl", "-s", "-u", f"{user}:{password}", "-o", sub_file, url] + cmd = ['curl', '-s', '-u', f'{user}:{password}', '-o', sub_file, url] sp.check_call(cmd) - sp.check_call(["unzip", "-qq", "-d", tmpdir, "-u", sub_file]) + sp.check_call(['unzip', '-qq', '-d', tmpdir, '-u', sub_file]) - abide_root = op.join(tmpdir, "ABIDE") + abide_root = op.join(tmpdir, 'ABIDE') files = [] for root, path, fname in os.walk(abide_root): - if fname and (fname[0].endswith("nii") or fname[0].endswith("nii.gz")): + if fname and (fname[0].endswith('nii') or fname[0].endswith('nii.gz')): if path: root = op.join(root, path[0]) files.append(op.join(root, fname[0])) index = len(abide_root) + 1 - site_name, sub_str = files[0][index:].split("/")[0].split("_") - subject_id = "sub-" + sub_str + site_name, sub_str = files[0][index:].split('/')[0].split('_') + subject_id = 'sub-' + sub_str for i in files: - ext = ".nii.gz" - if i.endswith(".nii"): - ext = ".nii" - if "mprage" in i: - bids_dir = op.join(out_dir, subject_id, "anat") + ext = '.nii.gz' + if i.endswith('.nii'): + ext = '.nii' + if 'mprage' in i: + bids_dir = op.join(out_dir, subject_id, 'anat') try: os.makedirs(bids_dir) except OSError as exc: if exc.errno != errno.EEXIST: raise exc - shutil.copy(i, op.join(bids_dir, subject_id + "_T1w" + ext)) + shutil.copy(i, op.join(bids_dir, subject_id + '_T1w' + ext)) - if "rest" in i: - bids_dir = op.join(out_dir, subject_id, "func") + if 'rest' in i: + bids_dir = op.join(out_dir, subject_id, 'func') try: os.makedirs(bids_dir) except OSError as exc: if exc.errno != errno.EEXIST: raise exc - shutil.copy(i, op.join(bids_dir, subject_id + "_rest_bold" + ext)) + shutil.copy(i, op.join(bids_dir, subject_id + '_rest_bold' + ext)) shutil.rmtree(tmpdir, ignore_errors=True, onerror=_myerror) @@ -187,5 +188,5 @@ def _myerror(message: str): print(warning) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/bin/dfcheck.py b/mriqc/bin/dfcheck.py index bca0595c..8c175d3a 100644 --- a/mriqc/bin/dfcheck.py +++ b/mriqc/bin/dfcheck.py @@ -29,6 +29,7 @@ import numpy as np import pandas as pd + from mriqc.bin import messages from mriqc.utils.misc import BIDS_COMP @@ -37,7 +38,7 @@ def read_iqms(feat_file): """Read in a features table.""" feat_file = Path(feat_file) - if feat_file.suffix == ".csv": + if feat_file.suffix == '.csv': x_df = pd.read_csv( feat_file, index_col=False, dtype={col: str for col in BIDS_COMP} ) @@ -46,7 +47,7 @@ def read_iqms(feat_file): bids_comps_present = [bit for bit in BIDS_COMP if bit in bids_comps_present] x_df = x_df.sort_values(by=bids_comps_present) # Remove sub- prefix in subject_id - x_df.subject_id = x_df.subject_id.str.lstrip("sub-") + x_df.subject_id = x_df.subject_id.str.lstrip('sub-') # Remove columns that are not IQMs feat_names = list(x_df._get_numeric_data().columns.ravel()) @@ -56,18 +57,18 @@ def read_iqms(feat_file): except ValueError: pass else: - bids_comps_present = ["subject_id"] + bids_comps_present = ['subject_id'] x_df = pd.read_csv( - feat_file, index_col=False, sep="\t", dtype={"bids_name": str} + feat_file, index_col=False, sep='\t', dtype={'bids_name': str} ) - x_df = x_df.sort_values(by=["bids_name"]) - x_df["subject_id"] = x_df.bids_name.str.lstrip("sub-") - x_df = x_df.drop(columns=["bids_name"]) - x_df.subject_id = ["_".join(v.split("_")[:-1]) for v in x_df.subject_id.ravel()] + x_df = x_df.sort_values(by=['bids_name']) + x_df['subject_id'] = x_df.bids_name.str.lstrip('sub-') + x_df = x_df.drop(columns=['bids_name']) + x_df.subject_id = ['_'.join(v.split('_')[:-1]) for v in x_df.subject_id.ravel()] feat_names = list(x_df._get_numeric_data().columns.ravel()) for col in feat_names: - if col.startswith(("size_", "spacing_", "Unnamed")): + if col.startswith(('size_', 'spacing_', 'Unnamed')): feat_names.remove(col) return x_df, feat_names, bids_comps_present @@ -76,31 +77,31 @@ def read_iqms(feat_file): def main(): """Entry point.""" parser = ArgumentParser( - description="Compare two pandas dataframes.", + description='Compare two pandas dataframes.', formatter_class=RawTextHelpFormatter, ) - g_input = parser.add_argument_group("Inputs") + g_input = parser.add_argument_group('Inputs') g_input.add_argument( - "-i", - "--input-csv", - action="store", + '-i', + '--input-csv', + action='store', type=Path, required=True, - help="input data frame", + help='input data frame', ) g_input.add_argument( - "-r", - "--reference-csv", - action="store", + '-r', + '--reference-csv', + action='store', type=Path, required=True, - help="reference dataframe", + help='reference dataframe', ) g_input.add_argument( - "--tolerance", + '--tolerance', type=float, default=1.0e-5, - help="relative tolerance for comparison", + help='relative tolerance for comparison', ) opts = parser.parse_args() @@ -146,13 +147,13 @@ def main(): changed_to = tst_df[ref_names].values[difference_locations] cols = [ref_names[v] for v in difference_locations[1]] bids_df = ref_df.loc[difference_locations[0], ref_bids].reset_index() - chng_df = pd.DataFrame({"iqm": cols, "from": changed_from, "to": changed_to}) + chng_df = pd.DataFrame({'iqm': cols, 'from': changed_from, 'to': changed_to}) table = pd.concat([bids_df, chng_df], axis=1) - print(table[ref_bids + ["iqm", "from", "to"]].to_string(index=False)) + print(table[ref_bids + ['iqm', 'from', 'to']].to_string(index=False)) corr = pd.DataFrame() - corr["iqms"] = ref_names - corr["cc"] = [ + corr['iqms'] = ref_names + corr['cc'] = [ float( np.corrcoef( ref_df[[var]].values.ravel(), @@ -174,5 +175,5 @@ def main(): sys.exit(0) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/bin/fs2gif.py b/mriqc/bin/fs2gif.py index 492ac238..0ffdd37b 100644 --- a/mriqc/bin/fs2gif.py +++ b/mriqc/bin/fs2gif.py @@ -39,19 +39,19 @@ def main(): """Entry point""" parser = ArgumentParser( - description="Batch export freesurfer results to animated gifs.", + description='Batch export freesurfer results to animated gifs.', formatter_class=RawTextHelpFormatter, ) - g_input = parser.add_argument_group("Inputs") - g_input.add_argument("-s", "--subject-id", action="store") - g_input.add_argument("-t", "--temp-dir", action="store") - g_input.add_argument("--keep-temp", action="store_true", default=False) - g_input.add_argument("--zoom", action="store_true", default=False) - g_input.add_argument("--hist-eq", action="store_true", default=False) - g_input.add_argument("--use-xvfb", action="store_true", default=False) + g_input = parser.add_argument_group('Inputs') + g_input.add_argument('-s', '--subject-id', action='store') + g_input.add_argument('-t', '--temp-dir', action='store') + g_input.add_argument('--keep-temp', action='store_true', default=False) + g_input.add_argument('--zoom', action='store_true', default=False) + g_input.add_argument('--hist-eq', action='store_true', default=False) + g_input.add_argument('--use-xvfb', action='store_true', default=False) - g_outputs = parser.add_argument_group("Outputs") - g_outputs.add_argument("-o", "--output-dir", action="store", default="fs2gif") + g_outputs = parser.add_argument_group('Outputs') + g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif') opts = parser.parse_args() @@ -72,7 +72,7 @@ def main(): if exc.errno != EEXIST: raise exc - subjects_dir = os.getenv("SUBJECTS_DIR", op.abspath("subjects")) + subjects_dir = os.getenv('SUBJECTS_DIR', op.abspath('subjects')) subject_list = [opts.subject_id] if opts.subject_id is None: subject_list = [ @@ -81,9 +81,9 @@ def main(): if op.isdir(os.path.join(subjects_dir, name)) ] environ = os.environ.copy() - environ["SUBJECTS_DIR"] = subjects_dir + environ['SUBJECTS_DIR'] = subjects_dir if opts.use_xvfb: - environ["doublebufferflag"] = 1 + environ['doublebufferflag'] = 1 # tcl_file = pkgr.resource_filename('mriqc', 'data/fsexport.tcl') tcl_contents = """ @@ -103,7 +103,7 @@ def main(): if exc.errno != EEXIST: raise exc - data = nb.load(op.join(sub_path, "mri", "norm.mgz")).get_fdata() + data = nb.load(op.join(sub_path, 'mri', 'norm.mgz')).get_fdata() data[data > 0] = 1 # Compute brain bounding box @@ -113,142 +113,138 @@ def main(): center = np.average([bbox_min, bbox_max], axis=0) if opts.hist_eq: - ref_file = op.join(tmp_sub, "%s.mgz" % subid) - img = nb.load(op.join(sub_path, "mri", "norm.mgz")) + ref_file = op.join(tmp_sub, '%s.mgz' % subid) + img = nb.load(op.join(sub_path, 'mri', 'norm.mgz')) data = exposure.equalize_adapthist(img.get_fdata(), clip_limit=0.03) nb.MGHImage(data, img.affine, img.header).to_filename(ref_file) if not opts.zoom: # Export tiffs for left hemisphere - tcl_file = op.join(tmp_sub, "%s.tcl" % subid) - with open(tcl_file, "w") as tclfp: + tcl_file = op.join(tmp_sub, '%s.tcl' % subid) + with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) tclfp.write( - "for { set slice %d } { $slice < %d } { incr slice } {" + 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]) ) - tclfp.write(" SetSlice $slice\n") - tclfp.write(" RedrawScreen\n") + tclfp.write(' SetSlice $slice\n') + tclfp.write(' RedrawScreen\n') tclfp.write( f' SaveTIFF [format "{tmp_sub}/{subid}-' + '%03d.tif" $i]\n' ) - tclfp.write(" incr i\n") - tclfp.write("}\n") - tclfp.write("QuitMedit\n") + tclfp.write(' incr i\n') + tclfp.write('}\n') + tclfp.write('QuitMedit\n') cmd = [ - "tkmedit", + 'tkmedit', subid, - "T1.mgz", - "lh.pial", - "-aux-surface", - "rh.pial", - "-tcl", + 'T1.mgz', + 'lh.pial', + '-aux-surface', + 'rh.pial', + '-tcl', tcl_file, ] if opts.use_xvfb: cmd = _xvfb_run() + cmd - print("Running tkmedit: %s" % " ".join(cmd)) + print('Running tkmedit: %s' % ' '.join(cmd)) sp.call(cmd, env=environ) # Convert to animated gif - print("Stacking coronal slices") + print('Stacking coronal slices') sp.call( [ - "convert", - "-delay", - "10", - "-loop", - "0", - f"{tmp_sub}/{subid}-*.tif", - f"{out_dir}/{subid}.gif", + 'convert', + '-delay', + '10', + '-loop', + '0', + f'{tmp_sub}/{subid}-*.tif', + f'{out_dir}/{subid}.gif', ] ) else: # Export tiffs for left hemisphere - tcl_file = op.join(tmp_sub, "lh-%s.tcl" % subid) - with open(tcl_file, "w") as tclfp: + tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid) + with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) - tclfp.write("SetZoomLevel 2") + tclfp.write('SetZoomLevel 2') tclfp.write( - "for { set slice %d } { $slice < %d } { incr slice } {" + 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]) ) tclfp.write( - " SetZoomCenter %d %d $slice\n" + ' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10) ) - tclfp.write(" SetSlice $slice\n") - tclfp.write(" RedrawScreen\n") + tclfp.write(' SetSlice $slice\n') + tclfp.write(' RedrawScreen\n') tclfp.write( - ' SaveTIFF [format "{}/{}-lh-%03d.tif" $i]\n'.format( - tmp_sub, subid - ) + f' SaveTIFF [format "{tmp_sub}/{subid}-lh-%03d.tif" $i]\n' ) - tclfp.write(" incr i\n") - tclfp.write("}\n") - tclfp.write("QuitMedit\n") - cmd = ["tkmedit", subid, "norm.mgz", "lh.white", "-tcl", tcl_file] + tclfp.write(' incr i\n') + tclfp.write('}\n') + tclfp.write('QuitMedit\n') + cmd = ['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file] if opts.use_xvfb: cmd = _xvfb_run() + cmd - print("Running tkmedit: %s" % " ".join(cmd)) + print('Running tkmedit: %s' % ' '.join(cmd)) sp.call(cmd, env=environ) # Convert to animated gif - print("Stacking coronal slices") + print('Stacking coronal slices') # Export tiffs for right hemisphere - tcl_file = op.join(tmp_sub, "rh-%s.tcl" % subid) - with open(tcl_file, "w") as tclfp: + tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid) + with open(tcl_file, 'w') as tclfp: tclfp.write(tcl_contents) - tclfp.write("SetZoomLevel 2") + tclfp.write('SetZoomLevel 2') tclfp.write( - "for { set slice %d } { $slice < %d } { incr slice } {" + 'for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]) ) tclfp.write( - " SetZoomCenter %d %d $slice\n" + ' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10) ) - tclfp.write(" SetSlice $slice\n") - tclfp.write(" RedrawScreen\n") + tclfp.write(' SetSlice $slice\n') + tclfp.write(' RedrawScreen\n') tclfp.write( - ' SaveTIFF [format "{}/{}-rh-%03d.tif" $slice]\n'.format( - tmp_sub, subid - ) + f' SaveTIFF [format "{tmp_sub}/{subid}-rh-%03d.tif" $slice]\n' ) - tclfp.write(" incr i\n") - tclfp.write("}\n") - tclfp.write("QuitMedit\n") - cmd = ["tkmedit", subid, "norm.mgz", "rh.white", "-tcl", tcl_file] + tclfp.write(' incr i\n') + tclfp.write('}\n') + tclfp.write('QuitMedit\n') + cmd = ['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file] if opts.use_xvfb: cmd = _xvfb_run() + cmd - print("Running tkmedit: %s" % " ".join(cmd)) + print('Running tkmedit: %s' % ' '.join(cmd)) sp.call(cmd, env=environ) # Convert to animated gif - print("Stacking coronal slices") + print('Stacking coronal slices') sp.call( [ - "convert", - "-delay", - "10", - "-loop", - "0", - f"{tmp_sub}/{subid}-lh-*.tif", - f"{out_dir}/{subid}-lh.gif", + 'convert', + '-delay', + '10', + '-loop', + '0', + f'{tmp_sub}/{subid}-lh-*.tif', + f'{out_dir}/{subid}-lh.gif', ] ) sp.call( [ - "convert", - "-delay", - "10", - "-loop", - "0", - f"{tmp_sub}/{subid}-rh-*.tif", - f"{out_dir}/{subid}-rh.gif", + 'convert', + '-delay', + '10', + '-loop', + '0', + f'{tmp_sub}/{subid}-rh-*.tif', + f'{out_dir}/{subid}-rh.gif', ] ) @@ -256,28 +252,28 @@ def main(): rmtree(tmp_sub, ignore_errors=True, onerror=_myerror) -def _xvfb_run(wait=5, server_args="-screen 0, 1600x1200x24", logs=None): +def _xvfb_run(wait=5, server_args='-screen 0, 1600x1200x24', logs=None): """ Wrap command with xvfb-run. Copied from: https://github.com/VUIIS/seam/blob/1dabd9ca5b1fc7d66ef7d41c34ea8d42d668a484/seam/util.py """ if logs is None: - logs = op.join(mkdtemp(), "fs2gif_xvfb") + logs = op.join(mkdtemp(), 'fs2gif_xvfb') return [ - "xvfb-run", - "-a", # automatically get a free server number - f"-f {logs}.out", - f"-e {logs}.err", - f"--wait={wait:d}", + 'xvfb-run', + '-a', # automatically get a free server number + f'-f {logs}.out', + f'-e {logs}.err', + f'--wait={wait:d}', f'--server-args="{server_args}"', ] def _myerror(msg): - print("WARNING: Error deleting temporal files: %s" % msg) + print('WARNING: Error deleting temporal files: %s' % msg) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/bin/labeler.py b/mriqc/bin/labeler.py index 29565fc7..6007d180 100644 --- a/mriqc/bin/labeler.py +++ b/mriqc/bin/labeler.py @@ -38,8 +38,8 @@ def num_rows(data): def main(): """read the input file""" - print("Reading file sinfo.csv") - csvfile = open("sinfo.csv", "rb") + print('Reading file sinfo.csv') + csvfile = open('sinfo.csv', 'rb') csvreader = csv.reader(csvfile) file = list(csvreader) @@ -55,8 +55,8 @@ def main(): hold[j - 1, i - 1] = int(file[i][j]) finished = np.divide(np.round(np.divide(finished, total) * 1000), 10) print(f"Completed: {' '.join(['%g%%' % f for f in finished])}") - print(f"Total: {np.round(np.divide(np.sum(finished), 3))}%") - input("Waiting: [enter]") + print(f'Total: {np.round(np.divide(np.sum(finished), 3))}%') + input('Waiting: [enter]') # file[1:] are all the rows order = range(1, len(file)) @@ -67,32 +67,32 @@ def main(): current = num_rows(file[row]) if current <= 1: # if less than 1, run the row - print("Check participant #" + file[row][0]) - fname = os.getcwd() + "/abide/" + file[row][0] + print('Check participant #' + file[row][0]) + fname = os.getcwd() + '/abide/' + file[row][0] if os.path.isfile(fname): - webbrowser.open("file://" + fname) - quality = input("Quality? [-1/0/1/e/c] ") - if quality == "e": + webbrowser.open('file://' + fname) + quality = input('Quality? [-1/0/1/e/c] ') + if quality == 'e': break - if quality == "c": - print("Current comment: " + file[row][4]) - comment = input("Comment: ") + if quality == 'c': + print('Current comment: ' + file[row][4]) + comment = input('Comment: ') if len(comment) > 0: file[row][4] = comment - quality = input("Quality? [-1/0/1/e] ") - if quality == "e": + quality = input('Quality? [-1/0/1/e] ') + if quality == 'e': break file[row][current] = quality else: - print("File does not exist") + print('File does not exist') - print("Writing file sinfo.csv") - outfile = open("sinfo.csv", "wb") + print('Writing file sinfo.csv') + outfile = open('sinfo.csv', 'wb') csvwriter = csv.writer(outfile) csvwriter.writerows(file) - print("Ending") + print('Ending') -if __name__ == "__main__": +if __name__ == '__main__': main() sys.exit(0) diff --git a/mriqc/bin/messages.py b/mriqc/bin/messages.py index 553b659c..f8bb1ce8 100644 --- a/mriqc/bin/messages.py +++ b/mriqc/bin/messages.py @@ -21,34 +21,34 @@ # https://www.nipreps.org/community/licensing/ # ABIDE_SUBJECT_FETCHED = ( - "Successfully processed subject {subject_id} from site {site_name}" + 'Successfully processed subject {subject_id} from site {site_name}' ) -ABIDE_TEMPORAL_WARNING = "WARNING: Error deleting temporal files: {message}" +ABIDE_TEMPORAL_WARNING = 'WARNING: Error deleting temporal files: {message}' BIDS_LABEL_MISSING = ( - "Participant label(s) not found in the BIDS root directory: {label}" + 'Participant label(s) not found in the BIDS root directory: {label}' ) BIDS_GROUP_SIZE = ( - "Group size should be at least 0 (i.e. all participants assigned to same group)." + 'Group size should be at least 0 (i.e. all participants assigned to same group).' ) -CLF_CAPTURED_WARNING = "Captured warning ({category}): {message}" +CLF_CAPTURED_WARNING = 'Captured warning ({category}): {message}' CLF_CLASSIFIER_MISSING = ( - "No training samples were given, and the --load-classifier option {info}." + 'No training samples were given, and the --load-classifier option {info}.' ) -CLF_SAVED_RESULTS = "Results saved as {path}." -CLF_TRAIN_LOAD_ERROR = "Errors ({n_errors}) loading training set: {errors}." -CLF_WRONG_PARAMETER_COUNT = "Wrong number of parameters." -DFCHECK_CSV_CHANGED = "Output CSV file changed one or more values." -DFCHECK_CSV_COLUMNS = "Output CSV file changed number of columns." -DFCHECK_DIFFERENT_BITS = "Dataset has different BIDS bits w.r.t. reference." +CLF_SAVED_RESULTS = 'Results saved as {path}.' +CLF_TRAIN_LOAD_ERROR = 'Errors ({n_errors}) loading training set: {errors}.' +CLF_WRONG_PARAMETER_COUNT = 'Wrong number of parameters.' +DFCHECK_CSV_CHANGED = 'Output CSV file changed one or more values.' +DFCHECK_CSV_COLUMNS = 'Output CSV file changed number of columns.' +DFCHECK_DIFFERENT_BITS = 'Dataset has different BIDS bits w.r.t. reference.' DFCHECK_DIFFERENT_LENGTH = """\ Input datasets have different lengths (input={len_input}, \ reference={len_reference}). """ -DFCHECK_IQMS_CORRELATED = "All IQMs show a Pearson correlation >= 0.95." -DFCHECK_IQMS_UNDER_095 = "IQMs with Pearson correlation < 0.95:\n{iqms}" -HASH_REPORT = "{sha} {file_name}" -PLOT_REPORT_VERSION = "mriqc version:\t{version}" -PLOT_WORK_MISSING = "Work directory of a previous MRIQC run was not found." +DFCHECK_IQMS_CORRELATED = 'All IQMs show a Pearson correlation >= 0.95.' +DFCHECK_IQMS_UNDER_095 = 'IQMs with Pearson correlation < 0.95:\n{iqms}' +HASH_REPORT = '{sha} {file_name}' +PLOT_REPORT_VERSION = 'mriqc version:\t{version}' +PLOT_WORK_MISSING = 'Work directory of a previous MRIQC run was not found.' SUBJECT_WRANGLER_DESCRIPTION = """\ BIDS-Apps participants wrangler tool ------------------------------------ @@ -56,5 +56,5 @@ This command arranges the participant labels in groups for computation, and checks that the \ requested participants have the corresponding folder in the bids_dir.\ """ -WEBAPI_GET = "Sending GET to {address}." -WEBAPI_REPORT = "There are {n_records} records in database." +WEBAPI_GET = 'Sending GET to {address}.' +WEBAPI_REPORT = 'There are {n_records} records in database.' diff --git a/mriqc/bin/mriqcwebapi_test.py b/mriqc/bin/mriqcwebapi_test.py index 58659057..2bc334af 100644 --- a/mriqc/bin/mriqcwebapi_test.py +++ b/mriqc/bin/mriqcwebapi_test.py @@ -30,26 +30,26 @@ def get_parser(): from argparse import ArgumentParser, RawTextHelpFormatter parser = ArgumentParser( - description="MRIQCWebAPI: Check entries.", formatter_class=RawTextHelpFormatter + description='MRIQCWebAPI: Check entries.', formatter_class=RawTextHelpFormatter ) parser.add_argument( - "modality", - action="store", - choices=["T1w", "bold"], - help="number of expected items in the database", + 'modality', + action='store', + choices=['T1w', 'bold'], + help='number of expected items in the database', ) parser.add_argument( - "expected", - action="store", + 'expected', + action='store', type=int, - help="number of expected items in the database", + help='number of expected items in the database', ) parser.add_argument( - "--webapi-url", - action="store", - default="https://mriqc.nimh.nih.gov/api/v1/T1w", + '--webapi-url', + action='store', + default='https://mriqc.nimh.nih.gov/api/v1/T1w', type=str, - help="IP address where the MRIQC WebAPI is listening", + help='IP address where the MRIQC WebAPI is listening', ) return parser @@ -66,11 +66,11 @@ def main(): get_log_message = messages.WEBAPI_GET.format(address=opts.webapi_url) MRIQC_LOG.info(get_log_message) response = get(opts.webapi_url).json() - n_records = response["_meta"]["total"] + n_records = response['_meta']['total'] response_log_message = messages.WEBAPI_REPORT.format(n_records=n_records) MRIQC_LOG.info(response_log_message) assert opts.expected == n_records -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/bin/nib_hash.py b/mriqc/bin/nib_hash.py index d29e7e61..e649880a 100644 --- a/mriqc/bin/nib_hash.py +++ b/mriqc/bin/nib_hash.py @@ -27,6 +27,7 @@ from hashlib import sha1 import nibabel as nb + from mriqc.bin import messages @@ -41,10 +42,10 @@ def get_parser() -> ArgumentParser: """ parser = ArgumentParser( - description="Compare two pandas dataframes.", + description='Compare two pandas dataframes.', formatter_class=RawTextHelpFormatter, ) - parser.add_argument("input_file", action="store", help="input nifti file") + parser.add_argument('input_file', action='store', help='input nifti file') return parser @@ -74,5 +75,5 @@ def main(): print(message) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/bin/subject_wrangler.py b/mriqc/bin/subject_wrangler.py index 00d812ea..2634862a 100644 --- a/mriqc/bin/subject_wrangler.py +++ b/mriqc/bin/subject_wrangler.py @@ -30,7 +30,7 @@ from mriqc import __version__ from mriqc.bin import messages -COMMAND = "{exec} {bids_dir} {out_dir} participant --participant_label {labels} {work_dir} {arguments} {logfile}" # noqa: E501 +COMMAND = '{exec} {bids_dir} {out_dir} participant --participant_label {labels} {work_dir} {arguments} {logfile}' # noqa: E501 def main(): @@ -41,75 +41,75 @@ def main(): ) parser.add_argument( - "-v", - "--version", - action="version", - version=f"mriqc v{__version__}", + '-v', + '--version', + action='version', + version=f'mriqc v{__version__}', ) parser.add_argument( - "bids_dir", - action="store", - help="The directory with the input dataset formatted according to the BIDS standard.", + 'bids_dir', + action='store', + help='The directory with the input dataset formatted according to the BIDS standard.', ) parser.add_argument( - "output_dir", - action="store", - help="The directory where the output files " - "should be stored. If you are running group level analysis " - "this folder should be prepopulated with the results of the" - "participant level analysis.", + 'output_dir', + action='store', + help='The directory where the output files ' + 'should be stored. If you are running group level analysis ' + 'this folder should be prepopulated with the results of the' + 'participant level analysis.', ) parser.add_argument( - "--participant_label", - "--subject_list", - "-S", - action="store", - help="The label(s) of the participant(s) that should be analyzed. " - "The label corresponds to sub- from the " + '--participant_label', + '--subject_list', + '-S', + action='store', + help='The label(s) of the participant(s) that should be analyzed. ' + 'The label corresponds to sub- from the ' 'BIDS spec (so it does not include "sub-"). If this parameter ' - "is not provided all subjects should be analyzed. Multiple " - "participants can be specified with a space separated list.", - nargs="*", + 'is not provided all subjects should be analyzed. Multiple ' + 'participants can be specified with a space separated list.', + nargs='*', ) parser.add_argument( - "--group-size", + '--group-size', default=1, - action="store", + action='store', type=int, - help="Parallelize participants in groups.", + help='Parallelize participants in groups.', ) parser.add_argument( - "--no-randomize", + '--no-randomize', default=False, - action="store_true", - help="Do not randomize participants list before grouping.", + action='store_true', + help='Do not randomize participants list before grouping.', ) parser.add_argument( - "--log-groups", + '--log-groups', default=False, - action="store_true", - help="Append logging output.", + action='store_true', + help='Append logging output.', ) parser.add_argument( - "--multiple-workdir", + '--multiple-workdir', default=False, - action="store_true", - help="Split work directories by jobs.", + action='store_true', + help='Split work directories by jobs.', ) parser.add_argument( - "--bids-app-name", - default="mriqc", - action="store", - help="BIDS app to call.", + '--bids-app-name', + default='mriqc', + action='store', + help='BIDS app to call.', ) - parser.add_argument("--args", default="", action="store", help="Append arguments.") + parser.add_argument('--args', default='', action='store', help='Append arguments.') opts = parser.parse_args() # Build settings dict bids_dir = op.abspath(opts.bids_dir) - subject_dirs = glob.glob(op.join(bids_dir, "sub-*")) + subject_dirs = glob.glob(op.join(bids_dir, 'sub-*')) all_subjects = sorted([op.basename(subj)[4:] for subj in subject_dirs]) subject_list = opts.participant_label @@ -118,14 +118,14 @@ def main(): else: # remove sub- prefix, get unique for i, subj in enumerate(subject_list): - subject_list[i] = subj[4:] if subj.startswith("sub-") else subj + subject_list[i] = subj[4:] if subj.startswith('sub-') else subj subject_list = sorted(list(set(subject_list))) if list(set(subject_list) - set(all_subjects)): non_exist = list(set(subject_list) - set(all_subjects)) missing_label_error = messages.BIDS_LABEL_MISSING.format( - label=" ".join(non_exist) + label=' '.join(non_exist) ) raise RuntimeError(missing_label_error) @@ -142,20 +142,20 @@ def main(): j = i + gsize groups = [subject_list[i:j] for i in range(0, len(subject_list), gsize)] - log_arg = ">> log/mriqc-{:04d}.log" if opts.log_groups else "" - workdir_arg = " -w work/sjob-{:04d}" if opts.multiple_workdir else "" + log_arg = '>> log/mriqc-{:04d}.log' if opts.log_groups else '' + workdir_arg = ' -w work/sjob-{:04d}' if opts.multiple_workdir else '' for i, part_group in enumerate(groups): kwargs = { - "exec": opts.bids_app_name, - "bids_dir": bids_dir, - "out_dir": opts.output_dir, - "labels": " ".join(part_group), - "work_dir": workdir_arg.format(i), - "arguments": opts.args, - "logfile": log_arg.format(i), + 'exec': opts.bids_app_name, + 'bids_dir': bids_dir, + 'out_dir': opts.output_dir, + 'labels': ' '.join(part_group), + 'work_dir': workdir_arg.format(i), + 'arguments': opts.args, + 'logfile': log_arg.format(i), } print(COMMAND.format(**kwargs)) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/cli/parser.py b/mriqc/cli/parser.py index b4e82ec9..1fd15243 100644 --- a/mriqc/cli/parser.py +++ b/mriqc/cli/parser.py @@ -43,8 +43,8 @@ def _parse_participant_labels(value): """ return sorted(set( - re.sub(r"^sub-", "", item.strip()) - for item in re.split(r"\s+", f"{value}".strip()) + re.sub(r'^sub-', '', item.strip()) + for item in re.split(r'\s+', f'{value}'.strip()) )) @@ -62,17 +62,17 @@ def _build_parser(): class DeprecateAction(Action): def __call__(self, parser, namespace, values, option_string=None): - warnings.warn(f"Argument {option_string} is deprecated and is *ignored*.") + warnings.warn(f'Argument {option_string} is deprecated and is *ignored*.') delattr(namespace, self.dest) class ParticipantLabelAction(Action): def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, _parse_participant_labels(" ".join(values))) + setattr(namespace, self.dest, _parse_participant_labels(' '.join(values))) def _path_exists(path, parser): """Ensure a given path exists.""" if path is None or not Path(path).exists(): - raise parser.error(f"Path does not exist: <{path}>.") + raise parser.error(f'Path does not exist: <{path}>.') return Path(path).expanduser().absolute() def _min_one(value, parser): @@ -83,10 +83,10 @@ def _min_one(value, parser): return value def _to_gb(value): - scale = {"G": 1, "T": 10**3, "M": 1e-3, "K": 1e-6, "B": 1e-9} - digits = "".join([c for c in value if c.isdigit()]) + scale = {'G': 1, 'T': 10**3, 'M': 1e-3, 'K': 1e-6, 'B': 1e-9} + digits = ''.join([c for c in value if c.isdigit()]) n_digits = len(digits) - units = value[n_digits:] or "G" + units = value[n_digits:] or 'G' return int(digits) * scale[units[0]] def _bids_filter(value): @@ -95,7 +95,7 @@ def _bids_filter(value): if value and Path(value).exists(): return loads(Path(value).read_text()) - verstr = f"MRIQC v{config.environment.version}" + verstr = f'MRIQC v{config.environment.version}' currentv = Version(config.environment.version) parser = ArgumentParser( @@ -114,63 +114,63 @@ def _bids_filter(value): # required, positional arguments # IMPORTANT: they must go directly with the parser object parser.add_argument( - "bids_dir", - action="store", + 'bids_dir', + action='store', type=PathExists, - help="The root folder of a BIDS valid dataset (sub-XXXXX folders should " - "be found at the top level in this folder).", + help='The root folder of a BIDS valid dataset (sub-XXXXX folders should ' + 'be found at the top level in this folder).', ) parser.add_argument( - "output_dir", - action="store", + 'output_dir', + action='store', type=Path, - help="The directory where the output files " - "should be stored. If you are running group level analysis " - "this folder should be prepopulated with the results of the " - "participant level analysis.", + help='The directory where the output files ' + 'should be stored. If you are running group level analysis ' + 'this folder should be prepopulated with the results of the ' + 'participant level analysis.', ) parser.add_argument( - "analysis_level", - action="store", - nargs="+", - help="Level of the analysis that will be performed. " - "Multiple participant level analyses can be run independently " - "(in parallel) using the same output_dir.", - choices=["participant", "group"], + 'analysis_level', + action='store', + nargs='+', + help='Level of the analysis that will be performed. ' + 'Multiple participant level analyses can be run independently ' + '(in parallel) using the same output_dir.', + choices=['participant', 'group'], ) # optional arguments - parser.add_argument("--version", action="version", version=verstr) + parser.add_argument('--version', action='version', version=verstr) parser.add_argument( - "-v", - "--verbose", - dest="verbose_count", - action="count", + '-v', + '--verbose', + dest='verbose_count', + action='count', default=0, - help="Increases log verbosity for each occurrence, debug level is -vvv.", + help='Increases log verbosity for each occurrence, debug level is -vvv.', ) # TODO: add 'mouse', 'macaque', and other populations once the pipeline is working parser.add_argument( - "--species", - action="store", + '--species', + action='store', type=str, - default="human", - choices=["human", "rat"], - help="Use appropriate template for population", + default='human', + choices=['human', 'rat'], + help='Use appropriate template for population', ) - g_bids = parser.add_argument_group("Options for filtering BIDS queries") + g_bids = parser.add_argument_group('Options for filtering BIDS queries') g_bids.add_argument( - "--participant-label", - "--participant_label", - "--participant-labels", - "--participant_labels", - dest="participant_label", + '--participant-label', + '--participant_label', + '--participant-labels', + '--participant_labels', + dest='participant_label', action=ParticipantLabelAction, - nargs="+", - help="A space delimited list of participant identifiers or a single " - "identifier (the sub- prefix can be removed).", + nargs='+', + help='A space delimited list of participant identifiers or a single ' + 'identifier (the sub- prefix can be removed).', ) g_bids.add_argument( '--bids-filter-file', action='store', type=Path, metavar='PATH', @@ -179,57 +179,57 @@ def _bids_filter(value): '(https://github.com/bids-standard/pybids/blob/master/bids/layout/config/bids.json)' ) g_bids.add_argument( - "--session-id", - action="store", - nargs="*", + '--session-id', + action='store', + nargs='*', type=str, - help="Filter input dataset by session ID.", + help='Filter input dataset by session ID.', ) g_bids.add_argument( - "--run-id", - action="store", + '--run-id', + action='store', type=int, - nargs="*", - help="DEPRECATED - This argument will be disabled. Use ``--bids-filter-file`` instead.", + nargs='*', + help='DEPRECATED - This argument will be disabled. Use ``--bids-filter-file`` instead.', ) g_bids.add_argument( - "--task-id", - action="store", - nargs="*", + '--task-id', + action='store', + nargs='*', type=str, - help="Filter input dataset by task ID.", + help='Filter input dataset by task ID.', ) g_bids.add_argument( - "-m", - "--modalities", - action="store", + '-m', + '--modalities', + action='store', choices=config.SUPPORTED_SUFFIXES, default=config.SUPPORTED_SUFFIXES, - nargs="*", - help="Filter input dataset by MRI type.", + nargs='*', + help='Filter input dataset by MRI type.', ) - g_bids.add_argument("--dsname", type=str, help="A dataset name.") + g_bids.add_argument('--dsname', type=str, help='A dataset name.') g_bids.add_argument( - "--bids-database-dir", - metavar="PATH", - help="Path to an existing PyBIDS database folder, for faster indexing " - "(especially useful for large datasets).", + '--bids-database-dir', + metavar='PATH', + help='Path to an existing PyBIDS database folder, for faster indexing ' + '(especially useful for large datasets).', ) g_bids.add_argument( - "--bids-database-wipe", - action="store_true", + '--bids-database-wipe', + action='store_true', default=False, - help="Wipe out previously existing BIDS indexing caches, forcing re-indexing.", + help='Wipe out previously existing BIDS indexing caches, forcing re-indexing.', ) # General performance - g_perfm = parser.add_argument_group("Options to handle performance") + g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument( - "--nprocs", - "--n_procs", - "--n_cpus", - "-n-cpus", - action="store", + '--nprocs', + '--n_procs', + '--n_cpus', + '-n-cpus', + action='store', type=PositiveInt, help="""\ Maximum number of simultaneously running parallel processes executed by *MRIQC* \ @@ -245,9 +245,9 @@ def _bids_filter(value): not be what you want in, e.g., shared systems like a HPC cluster.""", ) g_perfm.add_argument( - "--omp-nthreads", - "--ants-nthreads", - action="store", + '--omp-nthreads', + '--ants-nthreads', + action='store', type=PositiveInt, help="""\ Maximum number of threads that multi-threaded processes executed by *MRIQC* \ @@ -256,174 +256,174 @@ def _bids_filter(value): not be what you want in, e.g., shared systems like a HPC cluster.""", ) g_perfm.add_argument( - "--mem", - "--mem_gb", - "--mem-gb", - dest="memory_gb", - action="store", + '--mem', + '--mem_gb', + '--mem-gb', + dest='memory_gb', + action='store', type=_to_gb, - help="Upper bound memory limit for MRIQC processes.", + help='Upper bound memory limit for MRIQC processes.', ) g_perfm.add_argument( - "--testing", - dest="debug", - action="store_true", + '--testing', + dest='debug', + action='store_true', default=False, - help="Use testing settings for a minimal footprint.", + help='Use testing settings for a minimal footprint.', ) g_perfm.add_argument( - "-f", - "--float32", - action="store_true", + '-f', + '--float32', + action='store_true', default=True, help="Cast the input data to float32 if it's represented in higher precision " "(saves space and improves performance).", ) g_perfm.add_argument( - "--pdb", - dest="pdb", - action="store_true", + '--pdb', + dest='pdb', + action='store_true', default=False, - help="Open Python debugger (pdb) on exceptions.", + help='Open Python debugger (pdb) on exceptions.', ) # Control instruments - g_outputs = parser.add_argument_group("Instrumental options") + g_outputs = parser.add_argument_group('Instrumental options') g_outputs.add_argument( - "-w", - "--work-dir", - action="store", + '-w', + '--work-dir', + action='store', type=Path, - default=Path("work").absolute(), - help="Path where intermediate results should be stored.", + default=Path('work').absolute(), + help='Path where intermediate results should be stored.', ) - g_outputs.add_argument("--verbose-reports", default=False, action="store_true") - g_outputs.add_argument("--reports-only", default=False, action="store_true") + g_outputs.add_argument('--verbose-reports', default=False, action='store_true') + g_outputs.add_argument('--reports-only', default=False, action='store_true') g_outputs.add_argument( - "--write-graph", - action="store_true", + '--write-graph', + action='store_true', default=False, - help="Write workflow graph.", + help='Write workflow graph.', ) g_outputs.add_argument( - "--dry-run", - action="store_true", + '--dry-run', + action='store_true', default=False, - help="Do not run the workflow.", + help='Do not run the workflow.', ) g_outputs.add_argument( - "--resource-monitor", - "--profile", - dest="resource_monitor", - action="store_true", + '--resource-monitor', + '--profile', + dest='resource_monitor', + action='store_true', default=False, - help="Hook up the resource profiler callback to nipype.", + help='Hook up the resource profiler callback to nipype.', ) g_outputs.add_argument( - "--use-plugin", - action="store", + '--use-plugin', + action='store', default=None, type=Path, - help="Nipype plugin configuration file.", + help='Nipype plugin configuration file.', ) g_outputs.add_argument( - "--no-sub", + '--no-sub', default=False, - action="store_true", + action='store_true', help="Turn off submission of anonymized quality metrics " "to MRIQC's metrics repository.", ) g_outputs.add_argument( - "--email", - action="store", - default="", + '--email', + action='store', + default='', type=str, - help="Email address to include with quality metric submission.", + help='Email address to include with quality metric submission.', ) g_outputs.add_argument( - "--webapi-url", - action="store", + '--webapi-url', + action='store', type=str, - help="IP address where the MRIQC WebAPI is listening.", + help='IP address where the MRIQC WebAPI is listening.', ) g_outputs.add_argument( - "--webapi-port", - action="store", + '--webapi-port', + action='store', type=int, - help="Port where the MRIQC WebAPI is listening.", + help='Port where the MRIQC WebAPI is listening.', ) g_outputs.add_argument( - "--upload-strict", - action="store_true", + '--upload-strict', + action='store_true', default=False, - help="Upload will fail if upload is strict.", + help='Upload will fail if upload is strict.', ) g_outputs.add_argument( - "--notrack", - action="store_true", - help="Opt-out of sending tracking information of this run to the NiPreps developers. This" - " information helps to improve MRIQC and provides an indicator of real world usage " - " crucial for obtaining funding.", + '--notrack', + action='store_true', + help='Opt-out of sending tracking information of this run to the NiPreps developers. This' + ' information helps to improve MRIQC and provides an indicator of real world usage ' + ' crucial for obtaining funding.', ) # ANTs options - g_ants = parser.add_argument_group("Specific settings for ANTs") + g_ants = parser.add_argument_group('Specific settings for ANTs') g_ants.add_argument( - "--ants-float", - action="store_true", + '--ants-float', + action='store_true', default=False, - help="Use float number precision on ANTs computations.", + help='Use float number precision on ANTs computations.', ) g_ants.add_argument( - "--ants-settings", - action="store", - help="Path to JSON file with settings for ANTs.", + '--ants-settings', + action='store', + help='Path to JSON file with settings for ANTs.', ) # Functional workflow settings - g_func = parser.add_argument_group("Functional MRI workflow configuration") + g_func = parser.add_argument_group('Functional MRI workflow configuration') g_func.add_argument( - "--fft-spikes-detector", - action="store_true", + '--fft-spikes-detector', + action='store_true', default=False, - help="Turn on FFT based spike detector (slow).", + help='Turn on FFT based spike detector (slow).', ) g_func.add_argument( - "--fd_thres", - action="store", + '--fd_thres', + action='store', default=0.2, type=float, - help="Threshold on framewise displacement estimates to detect outliers.", + help='Threshold on framewise displacement estimates to detect outliers.', ) g_func.add_argument( - "--deoblique", - action="store_true", + '--deoblique', + action='store_true', default=False, - help="Deoblique the functional scans during head motion correction " - "preprocessing.", + help='Deoblique the functional scans during head motion correction ' + 'preprocessing.', ) g_func.add_argument( - "--despike", - action="store_true", + '--despike', + action='store_true', default=False, - help="Despike the functional scans during head motion correction " - "preprocessing.", + help='Despike the functional scans during head motion correction ' + 'preprocessing.', ) g_func.add_argument( - "--start-idx", + '--start-idx', action=DeprecateAction, type=int, - help="DEPRECATED Initial volume in functional timeseries that should be " - "considered for preprocessing.", + help='DEPRECATED Initial volume in functional timeseries that should be ' + 'considered for preprocessing.', ) g_func.add_argument( - "--stop-idx", + '--stop-idx', action=DeprecateAction, type=int, - help="DEPRECATED Final volume in functional timeseries that should be " - "considered for preprocessing.", + help='DEPRECATED Final volume in functional timeseries that should be ' + 'considered for preprocessing.', ) latest = check_latest() @@ -436,7 +436,7 @@ def _bids_filter(value): _blist = is_flagged() if _blist[0]: - _reason = _blist[1] or "unknown" + _reason = _blist[1] or 'unknown' print( f"""\ WARNING: This version of MRIQC ({config.environment.version}) has been FLAGGED @@ -451,27 +451,28 @@ def _bids_filter(value): def parse_args(args=None, namespace=None): """Parse args and run further checks on the command line.""" - from logging import DEBUG from contextlib import suppress from json import loads + from logging import DEBUG from pprint import pformat + + from niworkflows.utils.bids import DEFAULT_BIDS_QUERIES, collect_data + from mriqc import __version__ from mriqc.messages import PARTICIPANT_START - from niworkflows.utils.bids import collect_data, DEFAULT_BIDS_QUERIES - parser = _build_parser() opts = parser.parse_args(args, namespace) config.execution.log_level = int(max(25 - 5 * opts.verbose_count, DEBUG)) config.loggers.init() - extra_messages = [" " * 9 + "-" * 66] + extra_messages = [' ' * 9 + '-' * 66] if opts.bids_filter_file: extra_messages.insert( 0, - f" * BIDS filters-file: {opts.bids_filter_file.absolute()}.", + f' * BIDS filters-file: {opts.bids_filter_file.absolute()}.', ) config.loggers.cli.log( @@ -492,12 +493,12 @@ def parse_args(args=None, namespace=None): with open(opts.use_plugin) as f: plugin_settings = loadyml(f) - _plugin = plugin_settings.get("plugin") + _plugin = plugin_settings.get('plugin') if _plugin: config.nipype.plugin = _plugin - config.nipype.plugin_args = plugin_settings.get("plugin_args", {}) + config.nipype.plugin_args = plugin_settings.get('plugin_args', {}) config.nipype.nprocs = config.nipype.plugin_args.get( - "nprocs", config.nipype.nprocs + 'nprocs', config.nipype.nprocs ) # Load BIDS filters @@ -512,21 +513,21 @@ def parse_args(args=None, namespace=None): # Ensure input and output folders are not the same if output_dir == bids_dir: parser.error( - "The selected output folder is the same as the input BIDS folder. " - "Please modify the output path (suggestion: %s)." + 'The selected output folder is the same as the input BIDS folder. ' + 'Please modify the output path (suggestion: %s).' % bids_dir - / "derivatives" - / ("mriqc-%s" % version.split("+")[0]) + / 'derivatives' + / ('mriqc-%s' % version.split('+')[0]) ) if bids_dir in work_dir.parents: parser.error( - "The selected working directory is a subdirectory of the input BIDS folder. " - "Please modify the output path." + 'The selected working directory is a subdirectory of the input BIDS folder. ' + 'Please modify the output path.' ) # Setup directories - config.execution.log_dir = output_dir / "logs" + config.execution.log_dir = output_dir / 'logs' # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) output_dir.mkdir(exist_ok=True, parents=True) @@ -536,7 +537,7 @@ def parse_args(args=None, namespace=None): config.execution.init() participant_label = [ - d.name[4:] for d in config.execution.bids_dir.glob("sub-*") + d.name[4:] for d in config.execution.bids_dir.glob('sub-*') if d.is_dir() and d.exists() ] @@ -554,7 +555,7 @@ def parse_args(args=None, namespace=None): # Handle analysis_level analysis_level = set(config.workflow.analysis_level) if not config.execution.participant_label: - analysis_level.add("group") + analysis_level.add('group') config.workflow.analysis_level = list(analysis_level) # List of files to be run @@ -578,7 +579,7 @@ def parse_args(args=None, namespace=None): # Check the query is not empty if not list(config.workflow.inputs.values()): ffile = ( - "(--bids-filter-file was not set)" if not opts.bids_filter_file + '(--bids-filter-file was not set)' if not opts.bids_filter_file else f"(with '--bids-filter-file {opts.bids_filter_file}')" ) parser.error( @@ -594,7 +595,7 @@ def parse_args(args=None, namespace=None): ) if unknown_mods: parser.error( - "MRIQC is unable to process the following modalities: " + 'MRIQC is unable to process the following modalities: ' f'{", ".join(unknown_mods)}.' ) @@ -605,11 +606,11 @@ def parse_args(args=None, namespace=None): ) # set specifics for alternative populations - if opts.species.lower() != "human": + if opts.species.lower() != 'human': config.workflow.species = opts.species # TODO: add other species once rats are working - if opts.species.lower() == "rat": - config.workflow.template_id = "Fischer344" + if opts.species.lower() == 'rat': + config.workflow.template_id = 'Fischer344' # mean distance from the lateral edge to the center of the brain is # ~ PA:10 mm, LR:7.5 mm, and IS:5 mm (see DOI: 10.1089/089771503770802853) # roll movement is most likely to occur, so set to 7.5 mm diff --git a/mriqc/cli/run.py b/mriqc/cli/run.py index 5c094420..af359826 100644 --- a/mriqc/cli/run.py +++ b/mriqc/cli/run.py @@ -25,11 +25,12 @@ def main(): """Entry point for MRIQC's CLI.""" + import atexit import gc import os import sys from tempfile import mktemp - import atexit + from mriqc import config, messages from mriqc.cli.parser import parse_args @@ -42,7 +43,7 @@ def main(): from mriqc.utils.debug import setup_exceptionhook setup_exceptionhook() - config.nipype.plugin = "Linear" + config.nipype.plugin = 'Linear' # CRITICAL Save the config to a file. This is necessary because the execution graph # is built as a separate process to keep the memory footprint low. The most @@ -50,24 +51,24 @@ def main(): # The config file name needs to be unique, otherwise multiple mriqc instances # will create write conflicts. config_file = mktemp( - dir=config.execution.work_dir, prefix=".mriqc.", suffix=".toml" + dir=config.execution.work_dir, prefix='.mriqc.', suffix='.toml' ) config.to_filename(config_file) config.file_path = config_file exitcode = 0 # Set up participant level - if "participant" in config.workflow.analysis_level: + if 'participant' in config.workflow.analysis_level: _pool = None - if config.nipype.plugin in ("MultiProc", "LegacyMultiProc"): - from contextlib import suppress + if config.nipype.plugin in ('MultiProc', 'LegacyMultiProc'): import multiprocessing as mp import multiprocessing.forkserver from concurrent.futures import ProcessPoolExecutor + from contextlib import suppress - os.environ["OMP_NUM_THREADS"] = "1" + os.environ['OMP_NUM_THREADS'] = '1' with suppress(RuntimeError): - mp.set_start_method("fork") + mp.set_start_method('fork') gc.collect() _pool = ProcessPoolExecutor( @@ -83,7 +84,7 @@ def main(): _resmon = ResourceRecorder( pid=os.getpid(), log_file=mktemp( - dir=config.execution.work_dir, prefix=".resources.", suffix=".tsv" + dir=config.execution.work_dir, prefix='.resources.', suffix='.tsv' ), ) _resmon.start() @@ -106,8 +107,8 @@ def main(): p.start() p.join() - mriqc_wf = retval.get("workflow", None) - exitcode = p.exitcode or retval.get("return_code", 0) + mriqc_wf = retval.get('workflow', None) + exitcode = p.exitcode or retval.get('return_code', 0) # CRITICAL Load the config from the file. This is necessary because the ``build_workflow`` # function executed constrained in a process may change the config (and thus the global @@ -125,16 +126,16 @@ def main(): if _resmon: config.loggers.cli.info( - f"Started resource recording at {_resmon._logfile}." + f'Started resource recording at {_resmon._logfile}.' ) # Resource management options - if config.nipype.plugin in ("MultiProc", "LegacyMultiProc") and ( + if config.nipype.plugin in ('MultiProc', 'LegacyMultiProc') and ( 1 < config.nipype.nprocs < config.nipype.omp_nthreads ): config.loggers.cli.warning( - "Per-process threads (--omp-nthreads=%d) exceed total " - "threads (--nthreads/--n_cpus=%d)", + 'Per-process threads (--omp-nthreads=%d) exceed total ' + 'threads (--nthreads/--n_cpus=%d)', config.nipype.omp_nthreads, config.nipype.nprocs, ) @@ -143,7 +144,7 @@ def main(): sys.exit(os.EX_SOFTWARE) if mriqc_wf and config.execution.write_graph: - mriqc_wf.write_graph(graph2use="colored", format="svg", simple_form=True) + mriqc_wf.write_graph(graph2use='colored', format='svg', simple_form=True) if not config.execution.dry_run or not config.execution.reports_only: # Warn about submitting measures BEFORE @@ -158,7 +159,7 @@ def main(): from mriqc.engine.plugin import MultiProcPlugin _plugin = { - "plugin": MultiProcPlugin( + 'plugin': MultiProcPlugin( pool=_pool, plugin_args=config.nipype.plugin_args ), } @@ -180,18 +181,19 @@ def main(): _resmon.stop() plot( _resmon._logfile, - param="mem_rss_mb", - out_file=str(_resmon._logfile).replace(".tsv", ".rss.png"), + param='mem_rss_mb', + out_file=str(_resmon._logfile).replace('.tsv', '.rss.png'), ) plot( _resmon._logfile, - param="mem_vsm_mb", - out_file=str(_resmon._logfile).replace(".tsv", ".vsm.png"), + param='mem_vsm_mb', + out_file=str(_resmon._logfile).replace('.tsv', '.vsm.png'), ) # Set up group level - if "group" in config.workflow.analysis_level: + if 'group' in config.workflow.analysis_level: from mriqc.reports.group import gen_html as group_html + from ..utils.misc import generate_tsv # , generate_pred config.loggers.cli.info(messages.GROUP_START) @@ -213,11 +215,11 @@ def main(): # log.info('Predicted QA CSV table for the %s data generated (%s)', # mod, out_pred) - out_html = output_dir / f"group_{mod}.html" + out_html = output_dir / f'group_{mod}.html' group_html( out_tsv, mod, - csv_failed=output_dir / f"group_variant-failed_{mod}.csv", + csv_failed=output_dir / f'group_variant-failed_{mod}.csv', out_file=out_html, ) report_message = messages.GROUP_REPORT_GENERATED.format( @@ -240,5 +242,5 @@ def main(): sys.exit(exitcode) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/mriqc/cli/version.py b/mriqc/cli/version.py index 03f40ba4..9b202aa0 100644 --- a/mriqc/cli/version.py +++ b/mriqc/cli/version.py @@ -25,10 +25,11 @@ from pathlib import Path import requests + from mriqc import __version__ RELEASE_EXPIRY_DAYS = 14 -DATE_FMT = "%Y%m%d" +DATE_FMT = '%Y%m%d' def check_latest(): @@ -38,7 +39,7 @@ def check_latest(): latest = None date = None outdated = None - cachefile = Path.home() / ".cache" / "mriqc" / "latest" + cachefile = Path.home() / '.cache' / 'mriqc' / 'latest' try: cachefile.parent.mkdir(parents=True, exist_ok=True) except OSError: @@ -46,7 +47,7 @@ def check_latest(): if cachefile and cachefile.exists(): try: - latest, date = cachefile.read_text().split("|") + latest, date = cachefile.read_text().split('|') except Exception: pass else: @@ -61,12 +62,12 @@ def check_latest(): if latest is None or outdated is True: try: - response = requests.get(url="https://pypi.org/pypi/mriqc/json", timeout=1.0) + response = requests.get(url='https://pypi.org/pypi/mriqc/json', timeout=1.0) except Exception: response = None if response and response.status_code == 200: - versions = [Version(rel) for rel in response.json()["releases"].keys()] + versions = [Version(rel) for rel in response.json()['releases'].keys()] versions = [rel for rel in versions if not rel.is_prerelease] if versions: latest = sorted(versions)[-1] @@ -76,7 +77,7 @@ def check_latest(): if cachefile is not None and latest is not None: try: cachefile.write_text( - "|".join(("%s" % latest, datetime.now().strftime(DATE_FMT))) + '|'.join(('%s' % latest, datetime.now().strftime(DATE_FMT))) ) except Exception: pass @@ -98,7 +99,7 @@ def is_flagged(): response = None if response and response.status_code == 200: - flagged = response.json().get("flagged", {}) or {} + flagged = response.json().get('flagged', {}) or {} if __version__ in flagged: return True, flagged[__version__] diff --git a/mriqc/cli/workflow.py b/mriqc/cli/workflow.py index 7956f530..a58f8374 100644 --- a/mriqc/cli/workflow.py +++ b/mriqc/cli/workflow.py @@ -34,9 +34,10 @@ def build_workflow(config_file, retval): """Create the Nipype Workflow that supports the whole execution graph.""" import os + from mriqc import config # We do not need OMP > 1 for workflow creation - os.environ["OMP_NUM_THREADS"] = "1" + os.environ['OMP_NUM_THREADS'] = '1' from mriqc.workflows.core import init_mriqc_wf @@ -46,12 +47,12 @@ def build_workflow(config_file, retval): # Make sure loggers are started config.loggers.init() - retval["return_code"] = 1 - retval["workflow"] = None + retval['return_code'] = 1 + retval['workflow'] = None config.loggers.cli.log(25, "Building MRIQC's workflows...") - retval["workflow"] = init_mriqc_wf() - retval["return_code"] = int(retval["workflow"] is None) + retval['workflow'] = init_mriqc_wf() + retval['return_code'] = int(retval['workflow'] is None) config.loggers.cli.log( 25, f"Workflow building finished (exit code {retval['return_code']})." diff --git a/mriqc/config.py b/mriqc/config.py index fe13c37e..426934df 100644 --- a/mriqc/config.py +++ b/mriqc/config.py @@ -102,35 +102,35 @@ # Ignore annoying warnings from mriqc._warnings import logging -__version__ = get_version("mriqc") +__version__ = get_version('mriqc') _pre_exec_env = dict(os.environ) # Reduce numpy's vms by limiting OMP_NUM_THREADS -_default_omp_threads = int(os.getenv("OMP_NUM_THREADS", os.cpu_count())) +_default_omp_threads = int(os.getenv('OMP_NUM_THREADS', os.cpu_count())) # Disable NiPype etelemetry always _disable_et = bool( - os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None + os.getenv('NO_ET') is not None or os.getenv('NIPYPE_NO_ET') is not None ) -os.environ["NIPYPE_NO_ET"] = "1" -os.environ["NO_ET"] = "1" +os.environ['NIPYPE_NO_ET'] = '1' +os.environ['NO_ET'] = '1' -if not hasattr(sys, "_is_pytest_session"): +if not hasattr(sys, '_is_pytest_session'): sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings # Disable all warnings in main and children processes only on production versions if not any( ( - "+" in __version__, - __version__.endswith(".dirty"), - os.getenv("MRIQC_DEV", "0").lower() in ("1", "on", "true", "y", "yes"), + '+' in __version__, + __version__.endswith('.dirty'), + os.getenv('MRIQC_DEV', '0').lower() in ('1', 'on', 'true', 'y', 'yes'), ) ): - os.environ["PYTHONWARNINGS"] = "ignore" + os.environ['PYTHONWARNINGS'] = 'ignore' -logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING -logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG +logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING +logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG -SUPPORTED_SUFFIXES = ("T1w", "T2w", "bold", "dwi") +SUPPORTED_SUFFIXES = ('T1w', 'T2w', 'bold', 'dwi') DEFAULT_MEMORY_MIN_GB = 0.01 DSA_MESSAGE = """\ @@ -143,18 +143,18 @@ _exec_env = os.name _docker_ver = None # special variable set in the container -if os.getenv("IS_DOCKER_8395080871"): - _exec_env = "singularity" - _cgroup = Path("/proc/1/cgroup") - if _cgroup.exists() and "docker" in _cgroup.read_text(): - _docker_ver = os.getenv("DOCKER_VERSION_8395080871") - _exec_env = "docker" +if os.getenv('IS_DOCKER_8395080871'): + _exec_env = 'singularity' + _cgroup = Path('/proc/1/cgroup') + if _cgroup.exists() and 'docker' in _cgroup.read_text(): + _docker_ver = os.getenv('DOCKER_VERSION_8395080871') + _exec_env = 'docker' del _cgroup _templateflow_home = Path( os.getenv( - "TEMPLATEFLOW_HOME", - os.path.join(os.getenv("HOME"), ".cache", "templateflow"), + 'TEMPLATEFLOW_HOME', + os.path.join(os.getenv('HOME'), '.cache', 'templateflow'), ) ) @@ -165,39 +165,39 @@ except Exception: _free_mem_at_start = None -_oc_limit = "n/a" -_oc_policy = "n/a" +_oc_limit = 'n/a' +_oc_policy = 'n/a' try: # Memory policy may have a large effect on types of errors experienced - _proc_oc_path = Path("/proc/sys/vm/overcommit_memory") + _proc_oc_path = Path('/proc/sys/vm/overcommit_memory') if _proc_oc_path.exists(): - _oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get( - _proc_oc_path.read_text().strip(), "unknown" + _oc_policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get( + _proc_oc_path.read_text().strip(), 'unknown' ) - if _oc_policy != "never": - _proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes") + if _oc_policy != 'never': + _proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes') if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() if ( - _oc_limit in ("0", "n/a") - and Path("/proc/sys/vm/overcommit_ratio").exists() + _oc_limit in ('0', 'n/a') + and Path('/proc/sys/vm/overcommit_ratio').exists() ): - _oc_limit = "{}%".format( - Path("/proc/sys/vm/overcommit_ratio").read_text().strip() + _oc_limit = '{}%'.format( + Path('/proc/sys/vm/overcommit_ratio').read_text().strip() ) except Exception: pass _memory_gb = None try: - if "linux" in sys.platform: - with open("/proc/meminfo", "r") as f_in: + if 'linux' in sys.platform: + with open('/proc/meminfo') as f_in: _meminfo_lines = f_in.readlines() - _mem_total_line = [line for line in _meminfo_lines if "MemTotal" in line][0] + _mem_total_line = [line for line in _meminfo_lines if 'MemTotal' in line][0] _mem_total = float(_mem_total_line.split()[1]) _memory_gb = _mem_total / (1024.0**2) - elif "darwin" in sys.platform: - _mem_str = os.popen("sysctl hw.memsize").read().strip().split(" ")[-1] + elif 'darwin' in sys.platform: + _mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1] _memory_gb = float(_mem_str) / (1024.0**3) except Exception: pass @@ -215,7 +215,7 @@ class _Config: def __init__(self): """Avert instantiation.""" - raise RuntimeError("Configuration type is not instantiable.") + raise RuntimeError('Configuration type is not instantiable.') @classmethod def load(cls, settings, init=True): @@ -240,7 +240,7 @@ def get(cls): """Return defined settings.""" out = {} for k, v in cls.__dict__.items(): - if k.startswith("_") or v is None: + if k.startswith('_') or v is None: continue if callable(getattr(cls, k)): continue @@ -276,9 +276,9 @@ class environment(_Config): """Linux's kernel virtual memory overcommit policy.""" overcommit_limit = _oc_limit """Linux's kernel virtual memory overcommit limits.""" - nipype_version = get_version("nipype") + nipype_version = get_version('nipype') """Nipype's current version.""" - templateflow_version = get_version("templateflow") + templateflow_version = get_version('templateflow') """The TemplateFlow client version installed.""" total_memory = _memory_gb """Total memory available, in GB.""" @@ -291,7 +291,7 @@ class environment(_Config): class nipype(_Config): """Nipype settings.""" - crashfile_format = "txt" + crashfile_format = 'txt' """The file format for crashfiles, either text or pickle.""" get_linked_libs = False """Run NiPype's tool to enlist linked libraries for every interface.""" @@ -303,11 +303,11 @@ class nipype(_Config): """Number of processes (compute tasks) that can be run in parallel (multiprocessing only).""" omp_nthreads = _default_omp_threads """Number of CPUs a single process can access for multithreaded execution.""" - plugin = "MultiProc" + plugin = 'MultiProc' """NiPype's execution plugin.""" plugin_args = { - "maxtasksperchild": 1, - "raise_insufficient": False, + 'maxtasksperchild': 1, + 'raise_insufficient': False, } """Settings for NiPype's execution plugin.""" remove_node_directories = False @@ -321,13 +321,13 @@ class nipype(_Config): def get_plugin(cls): """Format a dictionary for Nipype consumption.""" out = { - "plugin": cls.plugin, - "plugin_args": cls.plugin_args, + 'plugin': cls.plugin, + 'plugin_args': cls.plugin_args, } - if cls.plugin in ("MultiProc", "LegacyMultiProc"): - out["plugin_args"]["n_procs"] = int(cls.nprocs) + if cls.plugin in ('MultiProc', 'LegacyMultiProc'): + out['plugin_args']['n_procs'] = int(cls.nprocs) if cls.memory_gb: - out["plugin_args"]["memory_gb"] = float(cls.memory_gb) + out['plugin_args']['memory_gb'] = float(cls.memory_gb) return out @classmethod @@ -338,11 +338,11 @@ def init(cls): # Nipype config (logs and execution) ncfg.update_config( { - "execution": { - "crashdump_dir": str(execution.log_dir), - "crashfile_format": cls.crashfile_format, - "get_linked_libs": cls.get_linked_libs, - "stop_on_first_crash": cls.stop_on_first_crash, + 'execution': { + 'crashdump_dir': str(execution.log_dir), + 'crashfile_format': cls.crashfile_format, + 'get_linked_libs': cls.get_linked_libs, + 'stop_on_first_crash': cls.stop_on_first_crash, } } ) @@ -369,7 +369,7 @@ class execution(_Config): """Run in sloppy mode (meaning, suboptimal parameters that minimize run-time).""" dry_run = False """Just test, do not run.""" - dsname = "" + dsname = '' """A dataset name used when generating files from the rating widget.""" echo_id = None """Select a particular echo for multi-echo EPI datasets.""" @@ -399,7 +399,7 @@ class execution(_Config): """Enable resource monitor.""" run_id = None """Filter input dataset by run identifier.""" - run_uuid = "{}_{}".format(strftime("%Y%m%d-%H%M%S"), uuid4()) + run_uuid = '{}_{}'.format(strftime('%Y%m%d-%H%M%S'), uuid4()) """Unique identifier of this particular run.""" session_id = None """Filter input dataset by session identifier.""" @@ -411,11 +411,11 @@ class execution(_Config): """Workflow will crash if upload is not successful.""" verbose_reports = False """Generate extended reports.""" - webapi_token = "" + webapi_token = '' """Authorization token for the WebAPI service.""" - webapi_url = "https://mriqc.nimh.nih.gov:443/api/v1" + webapi_url = 'https://mriqc.nimh.nih.gov:443/api/v1' """IP address where the MRIQC WebAPI is listening.""" - work_dir = Path("work").absolute() + work_dir = Path('work').absolute() """Path to a working directory where intermediate results will be available.""" write_graph = False """Write out the computational graph corresponding to the planned preprocessing.""" @@ -423,16 +423,16 @@ class execution(_Config): _layout = None _paths = ( - "anat_derivatives", - "bids_dir", - "bids_database_dir", - "fs_license_file", - "fs_subjects_dir", - "layout", - "log_dir", - "output_dir", - "templateflow_home", - "work_dir", + 'anat_derivatives', + 'bids_dir', + 'bids_database_dir', + 'fs_license_file', + 'fs_subjects_dir', + 'layout', + 'log_dir', + 'output_dir', + 'templateflow_home', + 'work_dir', ) @classmethod @@ -445,31 +445,32 @@ def init(cls): # Process --run-id if the argument was provided if cls.run_id: for mod in cls.modalities: - cls.bids_filters.setdefault(mod.lower(), {})["run"] = cls.run_id + cls.bids_filters.setdefault(mod.lower(), {})['run'] = cls.run_id if cls._layout is None: import re - from bids.layout.index import BIDSLayoutIndexer + from bids.layout import BIDSLayout + from bids.layout.index import BIDSLayoutIndexer ignore_paths = [ # Ignore folders at the top if they don't start with /sub-