diff --git a/docs/about.rst b/docs/about.rst index b07bb8b97..6dee284b0 100644 --- a/docs/about.rst +++ b/docs/about.rst @@ -1,5 +1,5 @@ =================== -Background +Background =================== Motivation @@ -19,7 +19,7 @@ their computational time and resources** effectively. ``CuBIDS`` is designed to facilitate the curation of large, neuroimaging data so that users can infer useful information from descriptive and accurate BIDS labels -before running pipelines *en masse*. ``CuBIDS`` accomplishes this by summarizing +before running pipelines *en masse*. ``CuBIDS`` accomplishes this by summarizing BIDS data using :ref:`keygroup`, :ref:`paramgroup`, and :ref:`acquisitiongroup` categorizations in your data (we'll explain what these are in more detail in the next section). @@ -40,7 +40,7 @@ Definitions * A set of scans whose filenames share all `BIDS filename key-value pairs `_, excluding subject and session * Derived from the BIDS Filename - * Example structure: ``acquisition-*_datatype-*_run-*_task-*_suffix`` + * Example structure: ``acquisition-*_datatype-*_run-*_task-*_suffix`` .. topic:: Parameter (Param) Group @@ -53,15 +53,15 @@ Definitions * The Param Group that contains the most scans in its Key Group .. topic:: Variant Group - + * Any Param Group that is non-dominant .. topic:: Rename Key Group - * Auto-generated, recommended new Key Group name for Variant Groups - * Based on the metadata parameters that cause scans in Variant Groups to vary from those in their respective Dominant Groups + * Auto-generated, recommended new Key Group name for Variant Groups + * Based on the metadata parameters that cause scans in Variant Groups to vary from those in their respective Dominant Groups -.. topic:: Acquisition Group +.. topic:: Acquisition Group * A collection of sessions across participants that contains the exact same set of Key and Param Groups @@ -85,4 +85,4 @@ In the next section, we'll discuss these definitions in more detail and demonstr .. [#f1] See the `BIDS Specification `_. .. [#f2] See this list of amazing `BIDS apps `_. -.. [#f3] See `DataLad `_. \ No newline at end of file +.. [#f3] See `DataLad `_. diff --git a/docs/authors.rst b/docs/authors.rst deleted file mode 100644 index e122f914a..000000000 --- a/docs/authors.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../AUTHORS.rst diff --git a/docs/conf.py b/docs/conf.py index 218cd3531..6b1bae97c 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,60 +16,61 @@ # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. -# import os import sys -sys.path.insert(0, os.path.abspath('..')) -from sphinx import __version__ as sphinxversion + +sys.path.insert(0, os.path.abspath("..")) + import cubids -from packaging import version as pver # Avoid distutils.LooseVersion which is deprecated # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath('sphinxext')) -sys.path.insert(0, os.path.abspath('../wrapper')) +sys.path.append(os.path.abspath("sphinxext")) +sys.path.insert(0, os.path.abspath("../wrapper")) # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # -needs_sphinx = '1.5.3' +needs_sphinx = "1.5.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinxarg.ext', # argparse extension - 'sphinx.ext.viewcode' + "nbsphinx", + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinxarg.ext", # argparse extension + "sphinx.ext.viewcode", + "sphinx_gallery.load_style", ] # Mock modules in autodoc: autodoc_mock_imports = [ - 'numpy', - 'nitime', - 'matplotlib', + "numpy", + "nitime", + "matplotlib", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'CuBIDS' +project = "CuBIDS" copyright = "2020, PennLINC" author = "PennLINC" @@ -87,15 +88,15 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -106,8 +107,10 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' -html_theme_path = ["_themes", ] +html_theme = "sphinx_rtd_theme" +html_theme_path = [ + "_themes", +] # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the @@ -118,13 +121,13 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'cubidsdoc' +htmlhelp_basename = "cubidsdoc" # -- Options for LaTeX output ------------------------------------------ @@ -133,15 +136,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -151,9 +151,7 @@ # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ - (master_doc, 'cubids.tex', - 'CuBIDS Documentation', - 'PennLINC', 'manual'), + (master_doc, "cubids.tex", "CuBIDS Documentation", "PennLINC", "manual"), ] @@ -161,11 +159,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cubids', - 'CuBIDS Documentation', - [author], 1) -] +man_pages = [(master_doc, "cubids", "CuBIDS Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------- @@ -174,14 +168,16 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'cubids', - 'CuBIDS Documentation', - author, - 'cubids', - 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "cubids", + "CuBIDS Documentation", + author, + "cubids", + "One line description of project.", + "Miscellaneous", + ), ] # -- Fix automodule config add_module_names = False - diff --git a/docs/contributing.rst b/docs/contributing.rst deleted file mode 100644 index e582053ea..000000000 --- a/docs/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../CONTRIBUTING.rst diff --git a/docs/examples.rst b/docs/examples.rst new file mode 100644 index 000000000..a9ed3a6ae --- /dev/null +++ b/docs/examples.rst @@ -0,0 +1,14 @@ +Thumbnails gallery +================== + +.. nbgallery:: + notebooks/Fieldmaps + notebooks/FirstProofofConcept + notebooks/HTML_param_groups + notebooks/JSON_PoC_read_write + notebooks/Key_and_Param_Groups + notebooks/keyparamgrouptest + notebooks/metadata_image_param + notebooks/PofC_Key_Values2 + notebooks/rename_files_work + notebooks/workwithtestdata diff --git a/docs/history.rst b/docs/history.rst deleted file mode 100644 index 250649964..000000000 --- a/docs/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../HISTORY.rst diff --git a/docs/index.rst b/docs/index.rst index d2daba54f..a492452e6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,6 +12,7 @@ Contents usage installation example - contributing - authors - history + examples + ../CONTRIBUTING + ../AUTHORS + ../HISTORY diff --git a/notebooks/Fieldmaps.ipynb b/docs/notebooks/Fieldmaps.ipynb similarity index 94% rename from notebooks/Fieldmaps.ipynb rename to docs/notebooks/Fieldmaps.ipynb index 0c14d5289..13ab18290 100644 --- a/notebooks/Fieldmaps.ipynb +++ b/docs/notebooks/Fieldmaps.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Fieldmaps OK?\n", + "# Check If Field Maps Are Defined For a Dataset\n", "\n", "This notebook shows how we check if fieldmaps are defined for the data set. There are two approaches:\n", "\n", @@ -29,19 +29,19 @@ "metadata": {}, "outputs": [], "source": [ - "# USE THIS BEFORE TESTING! \n", - "import sys \n", + "# USE THIS BEFORE TESTING!\n", + "import sys\n", "sys.path.append(\"..\")\n", - "from pathlib import Path \n", + "from pathlib import Path\n", "import shutil\n", "import os\n", "\n", - "from pkg_resources import resource_filename as pkgrf \n", + "from pkg_resources import resource_filename as pkgrf\n", "\n", "# returns string path to testdata\n", "TEST_DATA = pkgrf(\"cubids\", \"testdata\")\n", "\n", - "# should give you the full path \n", + "# should give you the full path\n", "tmp_path = Path().resolve()\n", "#print(tmp_path)\n", "\n", @@ -100,10 +100,10 @@ "import json\n", "\n", "def read_intendedfor(path):\n", - " \n", + "\n", " with open(str(path), 'r') as infile:\n", " data = json.load(infile)\n", - " \n", + "\n", " return data.get('IntendedFor')" ] }, @@ -143,9 +143,9 @@ "mapping = {}\n", "\n", "for fm in fmaps:\n", - " \n", + "\n", " intfor = read_intendedfor(fm)\n", - " \n", + "\n", " mapping[str(fm)] = intfor" ] }, @@ -184,21 +184,21 @@ "all_files = [str(x) for x in pathlib.Path(data_root).rglob(\"*.nii*\")]\n", "\n", "for k, v in mapping.items():\n", - " \n", + "\n", " if not v:\n", - " \n", + "\n", " print(\"{}: This fieldmap is not intended for any files!\".format(k))\n", - " \n", + "\n", " continue\n", - " \n", + "\n", " for fi in v:\n", - " \n", + "\n", " if any([fi in x for x in all_files]):\n", - " \n", + "\n", " print(\"{}: This fieldmap has a file\".format(k))\n", - " \n", + "\n", " else:\n", - " \n", + "\n", " print(\"{}: The file this fieldmap is intended for doesn't exist\".format(k))" ] }, diff --git a/notebooks/FirstProofofConcept.ipynb b/docs/notebooks/FirstProofofConcept.ipynb similarity index 100% rename from notebooks/FirstProofofConcept.ipynb rename to docs/notebooks/FirstProofofConcept.ipynb diff --git a/notebooks/HTML_param_groups.ipynb b/docs/notebooks/HTML_param_groups.ipynb similarity index 94% rename from notebooks/HTML_param_groups.ipynb rename to docs/notebooks/HTML_param_groups.ipynb index f04a82b10..e9854a736 100644 --- a/notebooks/HTML_param_groups.ipynb +++ b/docs/notebooks/HTML_param_groups.ipynb @@ -1,12 +1,19 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# HTML Param Groups" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "import sys \n", + "import sys\n", "sys.path.append(\"..\")" ] }, @@ -27,20 +34,20 @@ } ], "source": [ - "# USE THIS BEFORE TESTING! \n", + "# USE THIS BEFORE TESTING!\n", "\n", - "from pathlib import Path \n", + "from pathlib import Path\n", "import shutil\n", "import os\n", "#import cubids\n", - "from bids.layout import parse_file_entities \n", + "from bids.layout import parse_file_entities\n", "from cubids import CuBIDS\n", - "from pkg_resources import resource_filename as pkgrf \n", + "from pkg_resources import resource_filename as pkgrf\n", "\n", "# returns string path to testdata\n", "TEST_DATA = pkgrf(\"cubids\", \"testdata\")\n", "\n", - "# should give you the full path \n", + "# should give you the full path\n", "tmp_path = Path().resolve()\n", "#print(tmp_path)\n", "\n", @@ -235,7 +242,7 @@ "\n", "\n", "\n", - "# ISSUE! Grouping by char! \n", + "# ISSUE! Grouping by char!\n", "\n", "#param_group = cubids_obj.get_param_groups(key_group)\n", "# print(key_group)\n", @@ -254,14 +261,14 @@ "\n", "def file_to_entities(filename):\n", " entities = parse_file_entities(str(filename))\n", - " return entities \n", + " return entities\n", "\n", - "def file_to_key_group(filename): \n", + "def file_to_key_group(filename):\n", " entities = parse_file_entities(str(filename))\n", " keys = entities_to_key_group(entities)\n", " return keys\n", "\n", - "def key_group_to_entities(key_group): \n", + "def key_group_to_entities(key_group):\n", " return dict([group.split(\"-\") for group in key_group.split(\"_\")])\n", "\n", "def get_file_params(files):\n", @@ -276,14 +283,14 @@ " Returns:\n", " --------\n", "\n", - " files_params : dictionary \n", + " files_params : dictionary\n", " A dictionary of filename, param_dict pairs\n", "\n", " For each file in `files`, find critical parameters for metadata. Then find\n", " unique sets of these critical parameters.\n", " \"\"\"\n", "\n", - " # # DICTIONARY OF FILENAME, DICT_PARAMS \n", + " # # DICTIONARY OF FILENAME, DICT_PARAMS\n", " files_params = {}\n", " for path in files:\n", " metadata = self.layout.get_metadata(path)\n", @@ -301,11 +308,11 @@ " SliceNum, time in enumerate(SliceTime)})\n", " del example_data['SliceTiming']\n", "\n", - " # ADD TO THE DICTIONARY \n", - " files_params[path] = example_data \n", + " # ADD TO THE DICTIONARY\n", + " files_params[path] = example_data\n", "\n", " return files_params\n", - " \n", + "\n", "\n", "#def get_param_groups(key_group, path):\n", "# key_entities = key_group_to_entities(key_group)\n", @@ -319,8 +326,8 @@ "filename = \"/Users/Covitz/CuBIDS/cubids/testdata/complete/sub-01/ses-phdiff/fmap/sub-02_ses-phdiff_acq-v4_magnitude1.json\"\n", "path = \"/Users/Covitz/CuBIDS/cubids/testdata/complete/\"\n", "ret_entities = file_to_entities(filename)\n", - "print(ret_entities) \n", - "key_group = entities_to_key_group(ret_entities) \n", + "print(ret_entities)\n", + "key_group = entities_to_key_group(ret_entities)\n", "print(key_group)\n", "\n", "entities = key_group_to_entities(key_group)\n", @@ -360,8 +367,8 @@ "source": [ "\n", "\n", - "# IMPORT SET TRACE \n", - "# assert 0, debug \n", + "# IMPORT SET TRACE\n", + "# assert 0, debug\n", "\n", "\n", "\n", @@ -408,15 +415,15 @@ "metadata": {}, "outputs": [], "source": [ - "import pathlib \n", + "import pathlib\n", "\n", "# @Params\n", - "# - path: a string containing the path to the bids directory inside which we want to change files \n", + "# - path: a string containing the path to the bids directory inside which we want to change files\n", "# @Returns\n", - "# - HTML report of acquisitions and their parameter groups \n", + "# - HTML report of acquisitions and their parameter groups\n", "\n", "\n", - "# WHERE DO WE FIND THE ACQUISITION TYPE? \n", + "# WHERE DO WE FIND THE ACQUISITION TYPE?\n", "\n", "\n", "\n", @@ -434,23 +441,23 @@ "\n", "def html_groups(bids_dir):\n", " # get key groups using cubids.get_key_groups\n", - " # use key_group_to_entities to get entities \n", - " # get param groups for each entity \n", - " \n", - " \n", - " # initialize dictionary of acquisition types \n", + " # use key_group_to_entities to get entities\n", + " # get param groups for each entity\n", + "\n", + "\n", + " # initialize dictionary of acquisition types\n", " d_acts = {}\n", " for path in pathlib.Path(path_to_dir).iterdir():\n", " if path.is_file():\n", " ext = path.suffix\n", - " # check if the file is a .json file \n", + " # check if the file is a .json file\n", " if ext == \".json\":\n", " # parse keys\n", " d_keys = parse_file_entities(path)\n", - " \n", - " \n", - " \n", - " # create html file \n" + "\n", + "\n", + "\n", + " # create html file\n" ] }, { diff --git a/notebooks/JSON_PoC_read_write.ipynb b/docs/notebooks/JSON_PoC_read_write.ipynb similarity index 98% rename from notebooks/JSON_PoC_read_write.ipynb rename to docs/notebooks/JSON_PoC_read_write.ipynb index 6e57b6c4b..db1526811 100644 --- a/notebooks/JSON_PoC_read_write.ipynb +++ b/docs/notebooks/JSON_PoC_read_write.ipynb @@ -4,6 +4,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "# JSON Proof of Concept\n", + "\n", "In this proof of concept we will read & write JSON files in Jupyter notebook. \n", "\n", "1. display the data in the sidecar \n", @@ -17,11 +19,11 @@ "metadata": {}, "outputs": [], "source": [ - "#import json module to be able to read & write json files \n", + "#import json module to be able to read & write json files\n", "import json\n", "import pandas as pd\n", "from pandas.io.json import json_normalize\n", - "from glob import glob \n", + "from glob import glob\n", "from pathlib import Path" ] }, @@ -136,19 +138,19 @@ } ], "source": [ - "#testing the code with a single json file. \n", + "#testing the code with a single json file.\n", "\n", "file_test = open('/Users/bjaber/Projects/CuBIDS-use_cases/cubids/testdata/complete/sub-01/ses-phdiff/dwi/sub-01_ses-phdiff_acq-HASC55AP_dwi.json')\n", "sample_data = json.load(file_test)\n", "sample_data.keys()\n", "sample_data.get('SliceTiming')\n", - "SliceTime = sample_data.get('SliceTiming') #the way you can snatch things out of a dictionary \n", + "SliceTime = sample_data.get('SliceTiming') #the way you can snatch things out of a dictionary\n", "#if dict doesn't have the key it will return none vs. error\n", "\n", - "if SliceTime: \n", + "if SliceTime:\n", " sample_data.update({\"SliceTime%03d\"%SliceNum : time for SliceNum, time in enumerate(SliceTime)})\n", " del sample_data['SliceTiming']\n", - " \n", + "\n", "array_data = pd.DataFrame.from_dict(sample_data, orient='index')\n", "array_data" ] @@ -198,7 +200,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Here we change the value for AcquisionNumber from 1 to 2. \n", + "#Here we change the value for AcquisionNumber from 1 to 2.\n", "#json_data[\"AcquisitionNumber\"] = 2" ] }, @@ -248,7 +250,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Uncomment below to view the python object as a JSON string \n", + "#Uncomment below to view the python object as a JSON string\n", "#json_string" ] }, @@ -258,9 +260,9 @@ "metadata": {}, "outputs": [], "source": [ - "#notes from Matt \n", + "#notes from Matt\n", "\n", - "# have a function that does the reading and creates 1 row then you have to loop and the dataframe grows through concatanation \n", + "# have a function that does the reading and creates 1 row then you have to loop and the dataframe grows through concatanation\n", "# pandas.concat" ] }, @@ -337,7 +339,7 @@ } ], "source": [ - "for path in Path('/Users/bjaber/Projects/CuBIDS/cubids/testdata/complete').rglob('*.json'): \n", + "for path in Path('/Users/bjaber/Projects/CuBIDS/cubids/testdata/complete').rglob('*.json'):\n", " #print(path)\n", "\n", " counter=0\n", @@ -350,18 +352,18 @@ " file_tree = open(s_path)\n", " example_data = json.load(file_tree)\n", " SliceTime = example_data.get('SliceTiming') #the way you can snatch things out of a dictionary #if dict doesn't have the key it will return none vs. error\n", - " if SliceTime: \n", + " if SliceTime:\n", " example_data.update({\"SliceTime%03d\"%SliceNum : time for SliceNum, time in enumerate(SliceTime)})\n", " del example_data['SliceTiming']\n", " print(example_data)\n", - " #data = pd.DataFrame.from_dict(example_data, orient='index') \n", + " #data = pd.DataFrame.from_dict(example_data, orient='index')\n", " #data\n", " counter += 1\n", - " \n", "\n", - "#NOTE: error when trying to put the data into a pandas dataframe. \n", - "# print(example_data) was used to make sure that inputs that are an array such as in the field SliceTiming are being separated into indenpendent values of SliceTime00x that should feed into the dataframe. \n", - "# it is doing that across all json files that are being loaded from the directory " + "\n", + "#NOTE: error when trying to put the data into a pandas dataframe.\n", + "# print(example_data) was used to make sure that inputs that are an array such as in the field SliceTiming are being separated into indenpendent values of SliceTime00x that should feed into the dataframe.\n", + "# it is doing that across all json files that are being loaded from the directory" ] }, { diff --git a/notebooks/Key_and_Param_Groups.ipynb b/docs/notebooks/Key_and_Param_Groups.ipynb similarity index 95% rename from notebooks/Key_and_Param_Groups.ipynb rename to docs/notebooks/Key_and_Param_Groups.ipynb index 94c49359e..3970ce6df 100644 --- a/notebooks/Key_and_Param_Groups.ipynb +++ b/docs/notebooks/Key_and_Param_Groups.ipynb @@ -1,5 +1,12 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Key and Param Groups" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -90,7 +97,7 @@ "outputs": [], "source": [ "\n", - "# UNDOING RENAMING OF FILES \n", + "# UNDOING RENAMING OF FILES\n", "\n", "files_and_dirs = Path(\"/Users/scovitz/CuBIDS/cubids/testdata/complete/\").rglob('*')\n", "for path in files_and_dirs:\n", @@ -169,7 +176,7 @@ " output = bod.get_file_params(key_group)\n", " print(len(output))\n", " #print(output)\n", - " \n", + "\n", "\n", " #print(output)\n", " #output2 = bod.get_param_groups(key_group)\n", @@ -197,16 +204,16 @@ "#print(output[1])\n", "#print(output[0])\n", "# filenames = list(output.keys())\n", - "# first_params = output[filenames[0]] \n", - "# #for path in filenames: \n", + "# first_params = output[filenames[0]]\n", + "# #for path in filenames:\n", "# #print(path + \"\\n\")\n", "# #print(first_params)\n", "\n", "\n", "\n", "# GET ALL FILENAMES ASSOCIATED WITH A KEY GROUP\n", - "# USE GLOB TO FIND ALL INSTANCES \n", - "# IF THEY MATCH AND DICTS MATCH, DO THE REPLACEMENT \n", + "# USE GLOB TO FIND ALL INSTANCES\n", + "# IF THEY MATCH AND DICTS MATCH, DO THE REPLACEMENT\n", "\n", "\n", "# for i in range(len(files)):\n", @@ -220,8 +227,7 @@ "# print(filenames[0])\n", "\n", "# param_groups = bod.get_param_groups(key_group)\n", - "# print(len(param_groups))\n", - " " + "# print(len(param_groups))\n" ] }, { @@ -258,10 +264,10 @@ "source": [ "# TESTING SOMOE SHIT WOOOOOOOOOOOOO\n", "\n", - "dict_0 = {'EchoTime': 0.03, 'TotalReadoutTime': 0.0362102, 'RepetitionTime': 2.5, 'DwellTime': 3.1e-06, 'PartialFourier': 1, 'FlipAngle': 80, 'EffectiveEchoSpacing': 0.000510002, 'PhaseEncodingDirection': 'j-', 'SliceTime000': 1.2, 'SliceTime001': 0, 'SliceTime002': 1.3, 'SliceTime003': 0.1, 'SliceTime004': 1.4, 'SliceTime005': 0.1, 'SliceTime006': 1.4, 'SliceTime007': 0.2, 'SliceTime008': 1.5, 'SliceTime009': 0.3, 'SliceTime010': 1.6, 'SliceTime011': 0.3, 'SliceTime012': 1.6, 'SliceTime013': 0.4, 'SliceTime014': 1.7, 'SliceTime015': 0.5, 'SliceTime016': 1.8, 'SliceTime017': 0.5, 'SliceTime018': 1.8, 'SliceTime019': 0.6, 'SliceTime020': 1.9, 'SliceTime021': 0.7, 'SliceTime022': 2.0, 'SliceTime023': 0.7, 'SliceTime024': 2.0, 'SliceTime025': 0.8, 'SliceTime026': 2.1, 'SliceTime027': 0.9, 'SliceTime028': 2.2, 'SliceTime029': 0.9, 'SliceTime030': 2.2, 'SliceTime031': 1.0, 'SliceTime032': 2.3, 'SliceTime033': 1.0, 'SliceTime034': 2.4, 'SliceTime035': 1.1, 'SliceTime036': 2.4, 'SliceTime037': 1.2} \n", + "dict_0 = {'EchoTime': 0.03, 'TotalReadoutTime': 0.0362102, 'RepetitionTime': 2.5, 'DwellTime': 3.1e-06, 'PartialFourier': 1, 'FlipAngle': 80, 'EffectiveEchoSpacing': 0.000510002, 'PhaseEncodingDirection': 'j-', 'SliceTime000': 1.2, 'SliceTime001': 0, 'SliceTime002': 1.3, 'SliceTime003': 0.1, 'SliceTime004': 1.4, 'SliceTime005': 0.1, 'SliceTime006': 1.4, 'SliceTime007': 0.2, 'SliceTime008': 1.5, 'SliceTime009': 0.3, 'SliceTime010': 1.6, 'SliceTime011': 0.3, 'SliceTime012': 1.6, 'SliceTime013': 0.4, 'SliceTime014': 1.7, 'SliceTime015': 0.5, 'SliceTime016': 1.8, 'SliceTime017': 0.5, 'SliceTime018': 1.8, 'SliceTime019': 0.6, 'SliceTime020': 1.9, 'SliceTime021': 0.7, 'SliceTime022': 2.0, 'SliceTime023': 0.7, 'SliceTime024': 2.0, 'SliceTime025': 0.8, 'SliceTime026': 2.1, 'SliceTime027': 0.9, 'SliceTime028': 2.2, 'SliceTime029': 0.9, 'SliceTime030': 2.2, 'SliceTime031': 1.0, 'SliceTime032': 2.3, 'SliceTime033': 1.0, 'SliceTime034': 2.4, 'SliceTime035': 1.1, 'SliceTime036': 2.4, 'SliceTime037': 1.2}\n", "dict_1 = {'EchoTime': 0.03, 'TotalReadoutTime': 0.0362102, 'RepetitionTime': 2.5, 'DwellTime': 3.1e-06, 'PartialFourier': 1, 'FlipAngle': 80, 'EffectiveEchoSpacing': 0.000510002, 'PhaseEncodingDirection': 'j-', 'SliceTime000': 1.2, 'SliceTime001': 0, 'SliceTime002': 1.3, 'SliceTime003': 0.1, 'SliceTime004': 1.4, 'SliceTime005': 0.1, 'SliceTime006': 1.4, 'SliceTime007': 0.2, 'SliceTime008': 1.5, 'SliceTime009': 0.3, 'SliceTime010': 1.6, 'SliceTime011': 0.3, 'SliceTime012': 1.6, 'SliceTime013': 0.4, 'SliceTime014': 1.7, 'SliceTime015': 0.5, 'SliceTime016': 1.8, 'SliceTime017': 0.5, 'SliceTime018': 1.8, 'SliceTime019': 0.6, 'SliceTime020': 1.9, 'SliceTime021': 0.7, 'SliceTime022': 2.0, 'SliceTime023': 0.7, 'SliceTime024': 2.0, 'SliceTime025': 0.8, 'SliceTime026': 2.1, 'SliceTime027': 0.9, 'SliceTime028': 2.2, 'SliceTime029': 0.9, 'SliceTime030': 2.2, 'SliceTime031': 1.0, 'SliceTime032': 2.3, 'SliceTime033': 1.0, 'SliceTime034': 2.4, 'SliceTime035': 1.1, 'SliceTime036': 2.4, 'SliceTime037': 1.2}\n", "\n", - "if dict_0 == split_params: \n", + "if dict_0 == split_params:\n", " print(\"YAY\")\n", "else:\n", " print(\"STUPID\")" diff --git a/notebooks/PofC_Key_Values2.ipynb b/docs/notebooks/PofC_Key_Values2.ipynb similarity index 96% rename from notebooks/PofC_Key_Values2.ipynb rename to docs/notebooks/PofC_Key_Values2.ipynb index 0e17595f2..a647a2ef0 100644 --- a/notebooks/PofC_Key_Values2.ipynb +++ b/docs/notebooks/PofC_Key_Values2.ipynb @@ -1,5 +1,12 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Key Values Proof of Concept" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -88,7 +95,7 @@ } ], "source": [ - "# use pybids to extract BIDS entities from single subject \n", + "# use pybids to extract BIDS entities from single subject\n", "\n", "path = all_files[0]\n", "dict1= parse_file_entities(path)\n", @@ -134,8 +141,8 @@ "\n", "for file in all_files:\n", "#for each file in the list, parse the information into a dictionary and add it to the list we just initialized\n", - " result = parse_file_entities(file) \n", - " \n", + " result = parse_file_entities(file)\n", + "\n", " entities.append(result)\n", " #entities.add(string_result)\n", "print(entities)" @@ -156,23 +163,23 @@ ], "source": [ "\n", - "# loop through files to create a bigger dictionary of discrete keys, adding each value to a list \n", + "# loop through files to create a bigger dictionary of discrete keys, adding each value to a list\n", "dictionary = {}\n", "# initialize a new dictionary\n", "for e in entities:\n", - "# for each dictionary in the list we created above \n", + "# for each dictionary in the list we created above\n", " for k,v in e.items():\n", - " #for each set of key-value pairs in each dictionary \n", + " #for each set of key-value pairs in each dictionary\n", " #print(k,v)\n", " if k not in dictionary.keys():\n", " #if the key is not in the larger dictionary keys, set the value as value, but in a list\n", " dictionary[k]=[v]\n", - " else: \n", - " #if the key is in the dictionary, add the new value to the existing value list \n", + " else:\n", + " #if the key is in the dictionary, add the new value to the existing value list\n", " dictionary[k].append(v)\n", - " \n", - " \n", - "print(dictionary) " + "\n", + "\n", + "print(dictionary)" ] }, { @@ -191,25 +198,25 @@ } ], "source": [ - "#create one dictionary value per key in original dictionary \n", - "# loop through dictionary values and create dictionaries for instances of each list \n", - "l_dicts = [] \n", + "#create one dictionary value per key in original dictionary\n", + "# loop through dictionary values and create dictionaries for instances of each list\n", + "l_dicts = []\n", "for key in dictionary.keys():\n", "# for each list that is the value of the big dictionary:\n", " #print (key)\n", - " counts = {} #initialize a new dictionary for # of instances \n", + " counts = {} #initialize a new dictionary for # of instances\n", " l_labels = dictionary[key]\n", " #print(l_labels)\n", " for item in l_labels:\n", - " #for each item in those lists \n", + " #for each item in those lists\n", " if item not in counts.keys():\n", " #if the item is not in the new dictionary, set it to 1\n", - " counts[item]= 1 \n", + " counts[item]= 1\n", " else:\n", " #if it already exists, add 1\n", " counts[item]+= 1\n", " l_dicts.append(counts)\n", - "#list of dictionaries where KEYS: BIDS entities values and VALUES: instances of that key \n", + "#list of dictionaries where KEYS: BIDS entities values and VALUES: instances of that key\n", "print(l_dicts)\n", "\n" ] @@ -231,14 +238,13 @@ "#make a new dictionary with KEYS: BIDS entities (ie: subject, session, etc) and VALUES: dictionaries of ID's and instances\n", "\n", "new_dictionary = {}\n", - "counter = 0 \n", + "counter = 0\n", "for key in dictionary.keys():\n", - " #assign values from l_dicts to each key \n", + " #assign values from l_dicts to each key\n", " new_dictionary[key] = l_dicts[counter]\n", " counter += 1\n", "\n", - "print(new_dictionary)\n", - " " + "print(new_dictionary)\n" ] }, { @@ -247,7 +253,7 @@ "metadata": {}, "outputs": [], "source": [ - "#initialize new list for tuples \n", + "#initialize new list for tuples\n", "l_tups= []\n", "for key in new_dictionary:\n", " #list out all keys\n", diff --git a/notebooks/Tests/datatype-anat_reconstruction-refaced_suffix-T1w.csv b/docs/notebooks/Tests/datatype-anat_reconstruction-refaced_suffix-T1w.csv similarity index 100% rename from notebooks/Tests/datatype-anat_reconstruction-refaced_suffix-T1w.csv rename to docs/notebooks/Tests/datatype-anat_reconstruction-refaced_suffix-T1w.csv diff --git a/notebooks/Tests/datatype-func_run-1_suffix-bold_task-rest.csv b/docs/notebooks/Tests/datatype-func_run-1_suffix-bold_task-rest.csv similarity index 100% rename from notebooks/Tests/datatype-func_run-1_suffix-bold_task-rest.csv rename to docs/notebooks/Tests/datatype-func_run-1_suffix-bold_task-rest.csv diff --git a/notebooks/Tests/datatype-func_run-2_suffix-bold_task-rest.csv b/docs/notebooks/Tests/datatype-func_run-2_suffix-bold_task-rest.csv similarity index 100% rename from notebooks/Tests/datatype-func_run-2_suffix-bold_task-rest.csv rename to docs/notebooks/Tests/datatype-func_run-2_suffix-bold_task-rest.csv diff --git a/notebooks/keyparamgrouptest.ipynb b/docs/notebooks/keyparamgrouptest.ipynb similarity index 99% rename from notebooks/keyparamgrouptest.ipynb rename to docs/notebooks/keyparamgrouptest.ipynb index a260cd146..150eb3df6 100644 --- a/notebooks/keyparamgrouptest.ipynb +++ b/docs/notebooks/keyparamgrouptest.ipynb @@ -1,11 +1,11 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], - "source": [] + "source": [ + "# Key and Param Group Test" + ] }, { "cell_type": "code", @@ -1645,7 +1645,7 @@ "source": [ "from cubids.cubids import *\n", "files = [\n", - " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz', \n", + " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz',\n", " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARRP384BVX/ses-HBNsiteRU/dwi/sub-NDARRP384BVX_ses-HBNsiteRU_acq-64dir_dwi.nii.gz']\n", "\n", "dfs = []\n", @@ -1662,7 +1662,7 @@ " print(fieldmap_lookup[path])\n", " fieldmap_types = sorted([fmap.entities['fmap'] for fmap in fieldmap_lookup[path]])\n", " for fmap_num, fmap_type in enumerate(fieldmap_types):\n", - " example_data['fieldmap_type%02d' % fmap_num] = fmap_type \n", + " example_data['fieldmap_type%02d' % fmap_num] = fmap_type\n", "\n", " # Expand slice timing to multiple columns\n", " SliceTime = example_data.get('SliceTiming')\n", diff --git a/notebooks/metadata_image_param.ipynb b/docs/notebooks/metadata_image_param.ipynb similarity index 96% rename from notebooks/metadata_image_param.ipynb rename to docs/notebooks/metadata_image_param.ipynb index b8732d970..f7fe247e1 100644 --- a/notebooks/metadata_image_param.ipynb +++ b/docs/notebooks/metadata_image_param.ipynb @@ -4,6 +4,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "# Metadata Image Parameter Proof of Concept\n", + "\n", "In this proof of concept we will read & write JSON files in Jupyter notebook. \n", "\n", "1. display the data in the sidecar \n", @@ -17,11 +19,11 @@ "metadata": {}, "outputs": [], "source": [ - "#import json module to be able to read & write json files \n", + "#import json module to be able to read & write json files\n", "import json\n", "import pandas as pd\n", "from pandas.io.json import json_normalize\n", - "from glob import glob \n", + "from glob import glob\n", "from pathlib import Path" ] }, @@ -136,19 +138,19 @@ } ], "source": [ - "#testing the code with a single json file. \n", + "#testing the code with a single json file.\n", "\n", "file_test = open('/Users/bjaber/Projects/CuBIDS-use_cases/cubids/testdata/complete/sub-01/ses-phdiff/dwi/sub-01_ses-phdiff_acq-HASC55AP_dwi.json')\n", "sample_data = json.load(file_test)\n", "sample_data.keys()\n", "sample_data.get('SliceTiming')\n", - "SliceTime = sample_data.get('SliceTiming') #the way you can snatch things out of a dictionary \n", + "SliceTime = sample_data.get('SliceTiming') #the way you can snatch things out of a dictionary\n", "#if dict doesn't have the key it will return none vs. error\n", "\n", - "if SliceTime: \n", + "if SliceTime:\n", " sample_data.update({\"SliceTime%03d\"%SliceNum : time for SliceNum, time in enumerate(SliceTime)})\n", " del sample_data['SliceTiming']\n", - " \n", + "\n", "array_data = pd.DataFrame.from_dict(sample_data, orient='index', columns = ['1'])\n", "array_data" ] @@ -198,7 +200,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Here we change the value for AcquisionNumber from 1 to 2. \n", + "#Here we change the value for AcquisionNumber from 1 to 2.\n", "#json_data[\"AcquisitionNumber\"] = 2" ] }, @@ -248,7 +250,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Uncomment below to view the python object as a JSON string \n", + "#Uncomment below to view the python object as a JSON string\n", "#json_string" ] }, @@ -258,9 +260,9 @@ "metadata": {}, "outputs": [], "source": [ - "#notes from Matt \n", + "#notes from Matt\n", "\n", - "# have a function that does the reading and creates 1 row then you have to loop and the dataframe grows through concatanation \n", + "# have a function that does the reading and creates 1 row then you have to loop and the dataframe grows through concatanation\n", "# pandas.concat" ] }, @@ -451,44 +453,44 @@ " file_tree = open(s_path)\n", " example_data = json.load(file_tree)\n", " wanted_keys = example_data.keys() & IMAGING_PARAMS\n", - " example_data = {key: example_data[key] for key in wanted_keys} \n", + " example_data = {key: example_data[key] for key in wanted_keys}\n", " SliceTime = example_data.get('SliceTiming') #the way you can snatch things out of a dictionary #if dict doesn't have the key it will return none vs. error\n", - " if SliceTime: \n", + " if SliceTime:\n", " example_data.update({\"SliceTime%03d\"%SliceNum : [time] for SliceNum, time in enumerate(SliceTime)})\n", " del example_data['SliceTiming']\n", " #if ShimSetting:\n", - " \n", + "\n", " dfs.append(example_data)\n", - " \n", + "\n", "df = pd.DataFrame(dfs)\n", "#df.drop_duplicates()\n", "df.head()\n", "\n", "\n", "\n", - "#create dataframe of unique rows \n", - "#bids entities filter in the cubids class to filter through the files \n", - "#loop over , get metadata, and put into the dataframe \n", + "#create dataframe of unique rows\n", + "#bids entities filter in the cubids class to filter through the files\n", + "#loop over , get metadata, and put into the dataframe\n", "\n", "\n", "\n", " #print(example_data)\n", "\n", "\n", - " \n", + "\n", "#for file in example_data:\n", " #data = pd.DataFrame.from_dict(example_data, orient='index') # read data frame from json file\n", " #dfs.append(data) # append the data frame to the list\n", " #temp = pd.concat(dfs, ignore_index=True) # concatenate all the data frames in the list.\n", "\n", - " #data = pd.DataFrame.from_dict(example_data, orient='index') \n", + " #data = pd.DataFrame.from_dict(example_data, orient='index')\n", " #data\n", " #counter += 1\n", - " \n", "\n", - "#NOTE: error when trying to put the data into a pandas dataframe. This error happens regardless of the way SliceTiming is setup. \n", - "# print(example_data) was used to make sure that inputs that are an array such as in the field SliceTiming are being separated into indenpendent values of SliceTime00x that should feed into the dataframe. \n", - "# it is doing that across all json files that are being loaded from the directory " + "\n", + "#NOTE: error when trying to put the data into a pandas dataframe. This error happens regardless of the way SliceTiming is setup.\n", + "# print(example_data) was used to make sure that inputs that are an array such as in the field SliceTiming are being separated into indenpendent values of SliceTime00x that should feed into the dataframe.\n", + "# it is doing that across all json files that are being loaded from the directory" ] }, { diff --git a/notebooks/rename_files_work.ipynb b/docs/notebooks/rename_files_work.ipynb similarity index 98% rename from notebooks/rename_files_work.ipynb rename to docs/notebooks/rename_files_work.ipynb index eaff64144..68b2df57b 100644 --- a/notebooks/rename_files_work.ipynb +++ b/docs/notebooks/rename_files_work.ipynb @@ -1,5 +1,12 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Rename Files" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -14,7 +21,7 @@ } ], "source": [ - "# TEST BED \n", + "# TEST BED\n", "\n", "test = \"happy.py\"\n", "new = test.replace(\".py\", \".json\")\n", @@ -27,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "import sys \n", + "import sys\n", "sys.path.append(\"..\")" ] }, @@ -48,18 +55,18 @@ } ], "source": [ - "# USE THIS BEFORE TESTING! \n", + "# USE THIS BEFORE TESTING!\n", "\n", - "from pathlib import Path \n", + "from pathlib import Path\n", "import shutil\n", "import os\n", "\n", - "from pkg_resources import resource_filename as pkgrf \n", + "from pkg_resources import resource_filename as pkgrf\n", "\n", "# returns string path to testdata\n", "TEST_DATA = pkgrf(\"cubids\", \"testdata\")\n", "\n", - "# should give you the full path \n", + "# should give you the full path\n", "tmp_path = Path().resolve()\n", "#print(tmp_path)\n", "\n", @@ -237,7 +244,7 @@ "metadata": {}, "outputs": [], "source": [ - "import glob \n", + "import glob\n", "import os\n", "\n", "\n", @@ -246,13 +253,13 @@ "for path in Path(\"/Users/Covitz/CuBIDS/data/sub-1832999514/\").iterdir():\n", " if path.is_file():\n", " print(path.stem)\n", - " old_name = path.stem \n", + " old_name = path.stem\n", " old_ext = path.suffix\n", " directory = path.parent\n", " #print(type(directory))\n", " new_name = \"A_\" + old_name + old_ext\n", " path.rename(Path(directory, new_name))\n", - " \n" + "\n" ] }, { @@ -262,28 +269,28 @@ "outputs": [], "source": [ "# @Params\n", - "# - path: a string containing the path to the directory inside which we want to change files \n", + "# - path: a string containing the path to the directory inside which we want to change files\n", "# - pattern: the substring of the file we would like to replace\n", "# - replacement: the substring that will replace \"pattern\"\n", "# @Returns\n", - "# - None \n", + "# - None\n", "def rename_files_old(files, pattern, replacement):\n", " # what are \"pattern\" and \"replacement\"\n", - " # if you want to do a string replace, \n", - " # you need the sub string that needs to be added to the file \n", + " # if you want to do a string replace,\n", + " # you need the sub string that needs to be added to the file\n", " # and the portion you want cut\n", - " # but before you do the replace, shouldn't you run isValid() on the new filename? \n", + " # but before you do the replace, shouldn't you run isValid() on the new filename?\n", " new_files = []\n", " for file in files:\n", " new_filename = file.replace(pattern, replacement)\n", " os.rename(file, new_filename)\n", - " \n", + "\n", " #if isvalid(test_filename) == True:\n", " # new_file = test_filename\n", - " #else: \n", - " # exception will be raised inside the function isValid \n", + " #else:\n", + " # exception will be raised inside the function isValid\n", " # print(\"Invalid Filename\")\n", - " return new_files \n", + " return new_files\n", "\n" ] }, @@ -293,18 +300,18 @@ "metadata": {}, "outputs": [], "source": [ - "import pathlib \n", + "import pathlib\n", "\n", "# @Params\n", - "# - path: a string containing the path to the directory inside which we want to change files \n", + "# - path: a string containing the path to the directory inside which we want to change files\n", "# - pattern: the substring of the file we would like to replace\n", "# - replacement: the substring that will replace \"pattern\"\n", "# @Returns\n", - "# - None \n", + "# - None\n", "def rename_files_1(path_to_dir, pattern, replacement):\n", " for path in pathlib.Path(path_to_dir).iterdir():\n", " if path.is_file():\n", - " old_name = path.stem \n", + " old_name = path.stem\n", " old_ext = path.suffix\n", " directory = path.parent\n", " new_name = old_name.replace(pattern, replacement) + old_ext\n", @@ -317,14 +324,14 @@ "metadata": {}, "outputs": [], "source": [ - "import pathlib \n", + "import pathlib\n", "\n", "# @Params\n", - "# - path: a string containing the path to the bids directory inside which we want to change files \n", + "# - path: a string containing the path to the bids directory inside which we want to change files\n", "# - pattern: the substring of the file we would like to replace\n", "# - replacement: the substring that will replace \"pattern\"\n", "# @Returns\n", - "# - None \n", + "# - None\n", "def rename_files(bids_dir, pattern, replacement):\n", " files_and_dirs = Path(bids_dir).rglob('*')\n", " for path in files_and_dirs:\n", @@ -2283,11 +2290,11 @@ } ], "source": [ - "import glob \n", - "import pathlib \n", + "import glob\n", + "import pathlib\n", "\n", - "# testing out our function rename_files \n", - "# changes all filenames in all_files containing substrings \"PNC2\" to \"PNC20\" \n", + "# testing out our function rename_files\n", + "# changes all filenames in all_files containing substrings \"PNC2\" to \"PNC20\"\n", "\n", "#root_dir = \"/Users/Covitz/CuBIDS/data/sub-1832999514/ses-PNC2/func/\"\n", "\n", @@ -2316,21 +2323,21 @@ "metadata": {}, "outputs": [], "source": [ - "# PROCESS NOTES \n", + "# PROCESS NOTES\n", "\n", - "# in BIDS, want to replace everything up to the BIDS root \n", - "# don't want to replace all filenames up to the BIDS root \n", + "# in BIDS, want to replace everything up to the BIDS root\n", + "# don't want to replace all filenames up to the BIDS root\n", "\n", "# could have a rename subject function and a rename session function\n", - "# also have a rename files function \n", + "# also have a rename files function\n", "\n", - "# wants a single function that lets you replace any part of the string \n", + "# wants a single function that lets you replace any part of the string\n", "\n", "# pathlib.rglob - like \"find\" in the command line\n", "# bids_dir.rglob\n", - "# pybids.parsentities - if not valid BIDS, will get error from parse entities \n", + "# pybids.parsentities - if not valid BIDS, will get error from parse entities\n", "\n", - "# replace directory names and filenames " + "# replace directory names and filenames" ] }, { diff --git a/notebooks/workwithtestdata.ipynb b/docs/notebooks/workwithtestdata.ipynb similarity index 99% rename from notebooks/workwithtestdata.ipynb rename to docs/notebooks/workwithtestdata.ipynb index cbb6ce775..1270bcc97 100644 --- a/notebooks/workwithtestdata.ipynb +++ b/docs/notebooks/workwithtestdata.ipynb @@ -1,5 +1,12 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Work with Test Data" + ] + }, { "cell_type": "code", "execution_count": 4, @@ -28,7 +35,7 @@ " data_dir = test_data(Path(newdir))\n", " return data_dir\n", "\n", - "# copy the data \n", + "# copy the data\n", "data_root = copy_testing_data(\"test1\")" ] }, @@ -566,7 +573,7 @@ "source": [ "from cubids.cubids import *\n", "files = [\n", - " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz', \n", + " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz',\n", " '/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARRP384BVX/ses-HBNsiteRU/dwi/sub-NDARRP384BVX_ses-HBNsiteRU_acq-64dir_dwi.nii.gz']\n", "\n", "dfs = []\n", @@ -583,7 +590,7 @@ " print(fieldmap_lookup[path])\n", " fieldmap_types = sorted([fmap.entities['fmap'] for fmap in fieldmap_lookup[path]])\n", " for fmap_num, fmap_type in enumerate(fieldmap_types):\n", - " example_data['fieldmap_type%02d' % fmap_num] = fmap_type \n", + " example_data['fieldmap_type%02d' % fmap_num] = fmap_type\n", "\n", " # Expand slice timing to multiple columns\n", " SliceTime = example_data.get('SliceTiming')\n", diff --git a/docs/usage.rst b/docs/usage.rst index e9277781d..ff8fa367d 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -255,4 +255,4 @@ In the next section, we'll introduce ``DataLad`` and walk through a real example .. rubric:: Footnotes -.. [#f1] PNC: `The Philadelphia Developmental Cohort `_. \ No newline at end of file +.. [#f1] PNC: `The Philadelphia Developmental Cohort `_. diff --git a/pyproject.toml b/pyproject.toml index 0c25e9bf6..804b05f71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,11 +44,12 @@ Paper = "https://doi.org/10.1016/j.neuroimage.2022.119609" doc = [ "nbsphinx", "packaging", + "recommonmark", "sphinx >= 2.2", "sphinx-argparse", - "sphinx_rtd_theme", + "sphinx_gallery", "sphinx_markdown_tables", - "recommonmark", + "sphinx_rtd_theme", ] tests = [ "codespell",