From 73429bc300d7211fac9b22a287eeaf9c661490f8 Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Wed, 13 Sep 2023 07:17:01 +0200 Subject: [PATCH 1/7] bids conform --- .dockerignore | 0 .gitmodules | 0 app/config/__init__.py | 0 app/config/experiment.py | 6 +- app/inference_bids.py | 280 ++++++++++++++++++ app/models/__init__.py | 0 app/models/model_builder.py | 0 app/preprocess_bids.py | 140 +++++++++ app/requirements.txt | 8 +- app/templates/subcortical_mask_v3.nii.gz | Bin app/utils/__init__.py | 0 app/utils/base.py | 197 +++++------- app/utils/bayes_uncertainty_utils.py | 0 app/utils/confidence.py | 0 app/utils/create_hdf5_patch_dataset.py | 0 app/utils/h5data.py | 0 app/utils/helpers.py | 0 app/utils/keras_bayes_utils.py | 0 app/utils/patch_dataloader.py | 0 app/utils/post_processor.py | 10 +- app/weights/noel_deepFCD_dropoutMC_model_1.h5 | Bin app/weights/noel_deepFCD_dropoutMC_model_2.h5 | Bin app/weights/vnet_masker_model_best.pth.tar | Bin environment.yml | 11 + 24 files changed, 522 insertions(+), 130 deletions(-) mode change 100644 => 100755 .dockerignore mode change 100644 => 100755 .gitmodules mode change 100644 => 100755 app/config/__init__.py mode change 100644 => 100755 app/config/experiment.py create mode 100755 app/inference_bids.py mode change 100644 => 100755 app/models/__init__.py mode change 100644 => 100755 app/models/model_builder.py create mode 100755 app/preprocess_bids.py mode change 100644 => 100755 app/requirements.txt mode change 100644 => 100755 app/templates/subcortical_mask_v3.nii.gz mode change 100644 => 100755 app/utils/__init__.py mode change 100644 => 100755 app/utils/base.py mode change 100644 => 100755 app/utils/bayes_uncertainty_utils.py mode change 100644 => 100755 app/utils/confidence.py mode change 100644 => 100755 app/utils/create_hdf5_patch_dataset.py mode change 100644 => 100755 app/utils/h5data.py mode change 100644 => 100755 app/utils/helpers.py mode change 100644 => 100755 app/utils/keras_bayes_utils.py mode change 100644 => 100755 app/utils/patch_dataloader.py mode change 100644 => 100755 app/utils/post_processor.py mode change 100644 => 100755 app/weights/noel_deepFCD_dropoutMC_model_1.h5 mode change 100644 => 100755 app/weights/noel_deepFCD_dropoutMC_model_2.h5 mode change 100644 => 100755 app/weights/vnet_masker_model_best.pth.tar create mode 100755 environment.yml diff --git a/.dockerignore b/.dockerignore old mode 100644 new mode 100755 diff --git a/.gitmodules b/.gitmodules old mode 100644 new mode 100755 diff --git a/app/config/__init__.py b/app/config/__init__.py old mode 100644 new mode 100755 diff --git a/app/config/experiment.py b/app/config/experiment.py old mode 100644 new mode 100755 index 8da0111..06b5073 --- a/app/config/experiment.py +++ b/app/config/experiment.py @@ -38,7 +38,7 @@ options['initial_epoch_2'] = 1 # cases to exclude -options['exclude'] = ['.DS_Store', '._.DS_Store', '078', '095'] +options['exclude'] = ['.DS_Store', '._.DS_Store'] # threshold to select voxels for training, discarding CSF and darker WM in FLAIR options['thr'] = 0.1 @@ -46,9 +46,9 @@ options['th_dnn_train_2'] = 0.1 # probabilistic # post-processing binary threshold. After segmentation, probabilistic masks are binarized using a defined threshold. -options['t_bin'] = 0.1 +options['t_bin'] = 0.7 # The resulting binary mask is filtered by removing lesion regions with lesion size before a defined value -options['l_min'] = 25 +options['l_min'] = 75 options['patch_size'] = (16,16,16) options['train_split'] = 0.25 diff --git a/app/inference_bids.py b/app/inference_bids.py new file mode 100755 index 0000000..877b60f --- /dev/null +++ b/app/inference_bids.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +#%% +import logging +import multiprocessing +import os +import subprocess +import sys +import warnings +import bids +from bids import BIDSLayout + +from tqdm.contrib.concurrent import process_map +from functools import partial +#%% +from config.experiment import options + +warnings.filterwarnings("ignore") +import time + +import numpy as np +import setproctitle as spt +from tqdm import tqdm + +from utils.helpers import * + +from preprocess_bids import preprocess_image + +import argparse + +logging.basicConfig( + level=logging.DEBUG, + style="{", + datefmt="%Y-%m-%d %H:%M:%S", + format="{asctime} {levelname} {filename}:{lineno}: {message}", +) +os.environ["KERAS_BACKEND"] = "theano" + +# configuration +parser = argparse.ArgumentParser( + prog='deepFCD', + description='deepFCD model', + epilog="I dare you to at the code!") +parser.add_argument('-bp','--bidspath') +parser.add_argument('-sp','--space') +# set to True or any non-zero value for brain extraction or skull-removal, False otherwise +parser.add_argument('-bm','--brainmask',action='store_true',default=False) +# co-register T1 and T2 images to MNI152 space and N3 correction before brain extraction (True/False) +parser.add_argument('-pp','--preprocess',action='store_true',default=False) +parser.add_argument('-o','--overwrite',action='store_true',default=False) +parser.add_argument('-dev','--device',default='cpu') +parser.add_argument('-s','--subject',default=None) + +args_ = parser.parse_args() + +if not os.path.isabs(args_.bidspath): + args_.bidspath = os.path.abspath(args_.bidspath) + +print(args_.bidspath) +orig_ds = BIDSLayout(args_.bidspath, validate=False) +print(orig_ds) + +if args_.subject is None: + subjects = orig_ds.get_subjects() +else: + subjects = [args_.subject.replace('sub-','')] + print(subjects) + +# GPU/CPU options +# cpu, cuda, cuda0, cuda1, or cudaX: flag using gpu 1 or 2 +if args_.device.startswith("cuda1"): + os.environ[ + "THEANO_FLAGS" + ] = "mode=FAST_RUN,device=cuda1,floatX=float32,dnn.enabled=False" +elif args_.device.startswith("cpu"): + cores = str(multiprocessing.cpu_count() // 2) + var = os.getenv("OMP_NUM_THREADS", cores) + try: + logging.info("# of threads initialized: {}".format(int(var))) + except ValueError: + raise TypeError( + "The environment variable OMP_NUM_THREADS" + " should be a number, got '%s'." % var + ) + # os.environ['openmp'] = 'True' + os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,openmp=True,floatX=float32" +else: + os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cuda0,floatX=float32,dnn.enabled=False" +logging.info(os.environ["THEANO_FLAGS"]) + +from keras import backend as K +from keras.models import load_model + +from models.noel_models_keras import * +from utils.base import * +from utils.metrics import * + +outdir = os.path.join(os.path.dirname(args_.bidspath), "deepfcd") + +cwd = os.path.realpath(os.path.dirname(__file__)) +use_gpu = args_.device.startswith("cuda") +print(orig_ds) + +outdir = os.path.join(os.path.dirname(args_.bidspath),'deepfcd') +os.makedirs(outdir,exist_ok=True) +with open(os.path.join(outdir,'dataset_description.json'),'w') as f: + f.write('{"Name": "fov","BIDSVersion": "1.7.0","DatasetType": "derivative","PipelineDescription": {"Name": "antsRegistration"}}') + +if args_.brainmask: + #multiproc + t1w_paths = [] + flair_paths = [] + fullids=[] + for s in subjects: + t1w_paths.append(os.path.basename(orig_ds.get(subject=s,space=args_.space,suffix='T1w')[0].path)) + flair_paths.append(orig_ds.get(subject=s,space=args_.space,suffix='FLAIR')[0].path) + fullids.append(f"sub-{s}") + + process_map(partial(preprocess_image,indir_=args_.bidspath,outdir_=outdir,preprocess=args_.preprocess, use_gpu=use_gpu),fullids,t1w_paths,flair_paths) + + # for s in subjects: + # t1w_path = orig_ds.get(subject=s,space=args_.space,suffix='T1w')[0].path + # flair_path = orig_ds.get(subject=s,space=args_.space,suffix='FLAIR')[0].path + # preprocess_image(id_=f"sub-{s}", t1_fname=os.path.basename(t1w_path), t2_fname=os.path.basename(flair_path), indir_=args_.bidspath,outdir_=outdir,preprocess=args_.preprocess, use_gpu=use_gpu) + +else: + logging.info( + "Skipping image preprocessing and brain masking, presumably images are co-registered, bias-corrected, and skull-stripped" + ) + +proc_ds = BIDSLayout(outdir, validate=False) +if args_.subject is None: + subjects = proc_ds.get_subjects() +else: + subjects = [args_.subject.replace('sub-','')] + print(subjects) + +print(proc_ds) +#%% +# sys.exit(0) +#%% +# deepFCD configuration +K.set_image_dim_ordering("th") +K.set_image_data_format("channels_first") # TH dimension ordering in this code + +options["parallel_gpu"] = False +modalities = ["T1", "FLAIR"] +x_names = options["x_names"] + +# seed = options['seed'] +options["dropout_mc"] = False # TODO was True +options["batch_size"] = 350000 +options["mini_batch_size"] = 2048 +options["load_checkpoint_1"] = True +options["load_checkpoint_2"] = True + +# trained model weights based on 148 histologically-verified FCD subjects +options["test_folder"] = outdir +options["weight_paths"] = os.path.join(cwd, "weights") +options["experiment"] = "noel_deepFCD_dropoutMC" +logging.info("experiment: {}".format(options["experiment"])) +spt.setproctitle(options["experiment"]) + +#%% +# sys.exit(0) +#%% +# -------------------------------------------------- +# initialize the CNN +# -------------------------------------------------- +# initialize empty model +model = None +# initialize the CNN architecture +model = off_the_shelf_model(options) + +load_weights = os.path.join( + options["weight_paths"], "noel_deepFCD_dropoutMC_model_1.h5" +) +logging.info( + "loading DNN1, model[0]: {} exists".format(load_weights) +) if os.path.isfile(load_weights) else sys.exit( + "model[0]: {} doesn't exist".format(load_weights) +) +model[0] = load_model(load_weights) + +load_weights = os.path.join( + options["weight_paths"], "noel_deepFCD_dropoutMC_model_2.h5" +) +logging.info( + "loading DNN2, model[1]: {} exists".format(load_weights) +) if os.path.isfile(load_weights) else sys.exit( + "model[1]: {} doesn't exist".format(load_weights) +) +model[1] = load_model(load_weights) +logging.info(model[1].summary()) + +# -------------------------------------------------- +# test the cascaded model +# -------------------------------------------------- +# test_list = ['mcd_0468_1'] +# sys.exit(0) +for s in tqdm(subjects, desc="serving predictions using the trained model", colour="blue"): + fullid = f"sub-{s}" + options['fullid'] = fullid + # t1_file = ds.get(subject=s,space=args_.space,suffix='T1w')[0].path + 'label-brain_FLAIR.nii.gz' + 'label-brain_T1w.nii.gz' + + t1_file = proc_ds.get(subject=s, space='MNI152NLin2009aSym', label='brain', suffix='T1w')[0].path + t2_file = proc_ds.get(subject=s, space='MNI152NLin2009aSym', label='brain', suffix='FLAIR')[0].path + orig_bidsfiles = [ + orig_ds.get(subject=s,space=args_.space,suffix='T1w')[0], + orig_ds.get(subject=s,space=args_.space,suffix='FLAIR')[0] + ] + orig_files = [bf.path for bf in orig_bidsfiles] + + t1_transform = proc_ds.get(subject=s, extension='mat', suffix='T1w')[0].path + t2_transform = proc_ds.get(subject=s, extension='mat', suffix='FLAIR')[0].path + + files = [t1_file, t2_file] + + transform_files = [t1_transform, t2_transform] + + test_data = {} + test_data = {fullid: { + m: f for m, f in zip(modalities, files) # TOCHECK + } + } + test_transforms = {fullid: {m: n for m, n in zip(modalities, transform_files)}} + # test_data = {f: {m: os.path.join(options['test_folder'], f, n) for m, n in zip(modalities, files)} for f in test_list} + + + t_data = {} + t_data[fullid] = test_data[fullid] + transforms = {} + transforms[fullid] = test_transforms[fullid] + + options["pred_folder"] = os.path.join( + options["test_folder"], fullid, options["experiment"] + ) + os.makedirs(options["pred_folder"], exist_ok=True) + + pred_mean_fname = os.path.join(options["pred_folder"], f"{fullid}_space-MNI152NLin2009aSym_acq-{options['experiment']}Mean1_pred.nii.gz") + pred_var_fname = os.path.join(options["pred_folder"], f"{fullid}_space-MNI152NLin2009aSym_acq-{options['experiment']}Var1_pred.nii.gz") + + if np.logical_and(os.path.isfile(pred_mean_fname), os.path.isfile(pred_var_fname)): + logging.info("prediction for {} already exists".format(fullid)) + transform_img(pred_mean_fname,bids.layout.parse_file_entities(pred_mean_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) + transform_img(pred_var_fname,bids.layout.parse_file_entities(pred_var_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) + continue + + options["test_scan"] = fullid + + start = time.time() + logging.info("\n") + logging.info("-" * 70) + logging.info("testing the model for scan: {}".format(fullid)) + logging.info("-" * 70) + + # if transform(s) do not exist (i.e., no preprocessing done), then skip (see base.py#L412) + if not any([os.path.exists(transforms[fullid]["T1"]), os.path.exists(transforms[fullid]["FLAIR"])]): + transforms = None + + outputs = test_model( + model, + t_data, + options, + performance=True, + uncertainty=True, + transforms=transforms, + orig_files=orig_files, + invert_xfrm=True, + ) + #TODO loop over transforms, for now just use first + for k,v in outputs.items(): + transform_img(v,bids.layout.parse_file_entities(v),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) + + end = time.time() + diff = (end - start) // 60 + logging.info("-" * 70) + logging.info("time elapsed: ~ {} minutes".format(diff)) + logging.info("-" * 70) diff --git a/app/models/__init__.py b/app/models/__init__.py old mode 100644 new mode 100755 diff --git a/app/models/model_builder.py b/app/models/model_builder.py old mode 100644 new mode 100755 diff --git a/app/preprocess_bids.py b/app/preprocess_bids.py new file mode 100755 index 0000000..d15a66f --- /dev/null +++ b/app/preprocess_bids.py @@ -0,0 +1,140 @@ +import os +from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser + +import psutil +import torch +from mo_dots import to_data + +import deepMask.app.vnet as vnet +from deepMask.app.utils.data import * +from deepMask.app.utils.deepmask import * +from deepMask.app.utils.image_processing import noelImageProcessor + + +def preprocess_image(id_, t1_fname, t2_fname, indir_,outdir_, preprocess, use_gpu): + # set up parameters + outdir = os.path.join(outdir_, id_,'preproc') + os.makedirs(outdir, exist_ok=True) + + # tmpdir = os.path.join(outdir, id_, "tmp") + + # os.makedirs(tmpdir,exist_ok=True) + + t1 = os.path.join(indir_, id_, "anat", t1_fname) + t2 = os.path.join(indir_, id_, "anat", t2_fname) + args = to_data({}) # this is really dumb but the code needs it... + args.seed = 666 + + cwd = os.path.dirname(__file__) + + # trained weights based on manually corrected masks from + # 153 patients with cortical malformations + args.inference = os.path.join( + cwd, "deepMask/app/weights", "vnet_masker_model_best.pth.tar" + ) + # resize all input images to this resolution matching training data + args.resize = (160, 160, 160) + args.cuda = torch.cuda.is_available() and use_gpu + torch.manual_seed(args.seed) + args.device_ids = list(range(torch.cuda.device_count())) + # args.tmpdir = tmpdir + args.outdir = outdir + + mem_size = psutil.virtual_memory().available // ( + 1024 * 1024 * 1024 + ) # available RAM in GB + # mem_size = 32 + if mem_size < 64 and not use_gpu: + os.environ["BRAIN_MASKING"] = "cpu" + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + model = None + else: + if args.cuda: + torch.cuda.manual_seed(args.seed) + print("build vnet, using GPU") + else: + print("build vnet, using CPU") + model = vnet.build_model(args) + + template = os.path.join( + cwd, "deepMask/app/template", "mni_icbm152_t1_tal_nlin_sym_09a.nii.gz" + ) + + # MRI pre-processing configuration + output_suffix = "_brain_final.nii.gz" + + noelImageProcessor( + id=id_, + t1=t1, + t2=t2, + output_suffix=output_suffix, + output_dir=outdir, + template=template, + usen3=True, + args=args, + model=model, + preprocess=preprocess, + ).pipeline() + + +if __name__ == "__main__": + # configuration + # parse command line arguments + + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument( + "-i", "--id", dest="id", default="FCD_123", help="Alphanumeric patient code" + ) + parser.add_argument( + "-t1", + "--t1_fname", + dest="t1_fname", + default="t1.nii.gz", + help="T1-weighted image", + ) + parser.add_argument( + "-t2", + "--t2_fname", + dest="t2_fname", + default="t2.nii.gz", + help="T2-weighted image", + ) + parser.add_argument( + "-i", + "--indir", + dest="indir", + default="data/", + help="Directory containing the input images", + ) + parser.add_argument( + "-o", + "--outdir", + dest="outdir", + default="data/", + help="Directory containing the input images", + ) + parser.add_argument( + "-p", + "--preprocess", + dest="preprocess", + action="store_true", + help="Co-register and perform non-uniformity correction of input images", + ) + parser.add_argument( + "-g", + "--use_gpu", + dest="use_gpu", + action="store_true", + help="Compute using GPU, defaults to using CPU", + ) + args = parser.parse_args() + + preprocess_image( + id_= args.id, + t1_fname_= args.t1_fname, + t2_fname_= args.t2_fname, + indir_= args.indir, + outdir_= args.outdir, + preprocess_= args.preprocess, + use_gpu_= args.use_gpu, + ) diff --git a/app/requirements.txt b/app/requirements.txt old mode 100644 new mode 100755 index b0241e5..b1c321a --- a/app/requirements.txt +++ b/app/requirements.txt @@ -1,4 +1,4 @@ -antspyx==0.3.5 --only-binary=antspyx +antspyx==0.3.8 --only-binary=antspyx git+https://github.com/ravnoor/atlasreader@master#egg=atlasreader Theano==1.0.4 keras==2.2.4 @@ -7,7 +7,7 @@ matplotlib==3.5.1 mo-dots==9.147.22086 nibabel==3.2.2 nilearn==0.9.1 -numpy==1.21.6 +numpy==1.18.5 pandas==1.3.5 psutil==5.9.2 scikit-image==0.19.2 @@ -16,4 +16,6 @@ scipy==1.7.3 setproctitle==1.2.3 tabulate==0.9.0 tqdm==4.64.0 -xlrd==2.0.1 \ No newline at end of file +xlrd==2.0.1 +tensorflow==1.15.5 +tensorflow_probability==0.8 diff --git a/app/templates/subcortical_mask_v3.nii.gz b/app/templates/subcortical_mask_v3.nii.gz old mode 100644 new mode 100755 diff --git a/app/utils/__init__.py b/app/utils/__init__.py old mode 100644 new mode 100755 diff --git a/app/utils/base.py b/app/utils/base.py old mode 100644 new mode 100755 index 762f760..66f5b31 --- a/app/utils/base.py +++ b/app/utils/base.py @@ -1,3 +1,4 @@ +#%% import json import os import time @@ -8,6 +9,10 @@ import nibabel as nib import numpy as np import pandas as pd + +from bids import BIDSLayout + +#%% from keras.callbacks import (CSVLogger, EarlyStopping, LambdaCallback, ModelCheckpoint) from keras.models import load_model @@ -18,8 +23,9 @@ from utils.patch_dataloader import * from utils.post_processor import * +import re - +#%% def print_data_shape(X): print("====> # 3D training patches:", X.shape[0], "\n") print("====> # patch size:", (X.shape[2], X.shape[3], X.shape[4]), "\n") @@ -377,14 +383,17 @@ def test_model( orig_files=None, invert_xfrm=True, ): + outputs = {} threshold = options["th_dnn_train_2"] scan = options["test_scan"] + "_" # organize experiments # first network - options["test_name"] = scan + options["experiment"] + "_prob_0.nii.gz" - options["test_mean_name"] = scan + options["experiment"] + "_prob_mean_0.nii.gz" - options["test_var_name"] = scan + options["experiment"] + "_prob_var_0.nii.gz" - + options["test_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}0_pred.nii.gz") + options["test_mean_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Mean0_pred.nii.gz") + options["test_var_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Var0_pred.nii.gz") + pred_var_0_img= None + pred_var_1_img=None + if uncertainty: pred_mean_0, pred_var_0, header = test_scan( model[0], @@ -404,26 +413,21 @@ def test_model( options, save_nifti=True, uncertainty=uncertainty, - T=20, + T=1, ) + + # pred_mean_0_img = nifti2ants(pred_mean_0, affine=None, header=header) + outputs['pred_mean_0_path'] = options["test_mean_name"] + + if pred_var_0_img is not None: + outputs['pred_var_0_path'] = options["test_var_name"] - pred_mean_0_img = nifti2ants(pred_mean_0, affine=header.get_qform(), header=header) - - if isinstance(transforms, dict): - apply_transforms( - pred_mean_0_img, - pred_var_0_img, - transforms, - orig_files, - invert_xfrm, - options, - uncertainty, - ) + # pred_mean_0_img = nifti2ants(pred_mean_0, affine=header.get_qform(), header=header) # second network - options["test_name"] = scan + options["experiment"] + "_prob_1.nii.gz" - options["test_mean_name"] = scan + options["experiment"] + "_prob_mean_1.nii.gz" - options["test_var_name"] = scan + options["experiment"] + "_prob_var_1.nii.gz" + options["test_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}1_pred.nii.gz") + options["test_mean_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Mean1_pred.nii.gz") + options["test_var_name"] = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Var1_pred.nii.gz") if uncertainty: pred_mean_1, pred_var_1, header = test_scan( @@ -435,8 +439,11 @@ def test_model( T=50, candidate_mask=pred_mean_0 > threshold, ) + # pred_var_1_img = nifti2ants( + # pred_var_1, affine=header.get_qform(), header=header + # ) pred_var_1_img = nifti2ants( - pred_var_1, affine=header.get_qform(), header=header + pred_var_1, affine=None, header=header ) else: pred_mean_1, header = test_scan( @@ -445,32 +452,34 @@ def test_model( options, save_nifti=True, uncertainty=uncertainty, - T=50, + T=1, candidate_mask=pred_mean_0 > threshold, ) - - pred_mean_1_img = nifti2ants(pred_mean_1, affine=header.get_qform(), header=header) - - if isinstance(transforms, dict): - apply_transforms( - pred_mean_1_img, - pred_var_1_img, - transforms, - orig_files, - invert_xfrm, - options, - uncertainty, - ) - + + + # pred_mean_1_img = nifti2ants(pred_mean_1, affine=header.get_qform(), header=header) + # pred_mean_1_img = nifti2ants(pred_mean_1, affine=None, header=header) + # outputs['pred_mean_1_img'] = pred_mean_1_img + outputs['pred_mean_1_path'] = options["test_mean_name"] + + if pred_var_1_img is not None: + outputs['pred_var_1_path'] = options["test_var_name"] + + + if performance: # postprocess the output segmentation - options["test_name"] = options["experiment"] + "_out_CNN.nii.gz" - out_segmentation, lpred, count = post_processing( + print('postprocessing') + # options["test_name"] = options["experiment"] + "_out_CNN.nii.gz" + maskpath, labelpath, _ = post_processing( pred_mean_1, options, header, save_nifti=True ) - outputs = [pred_mean_0, pred_mean_1, out_segmentation, lpred, count] - else: - outputs = [pred_mean_0, pred_mean_1] + + outputs['mask_path'] = maskpath + outputs['label_path'] = labelpath + + # outputs = [pred_mean_0, pred_var_0, pred_mean_1, pred_var_0 out_segmentation, lpred, count] + return outputs @@ -480,78 +489,30 @@ def nifti2ants(input_np, affine, header): return output_ants -def apply_transforms( - pred_mean_img, - pred_var_img, - transforms, - orig_files, - invert_xfrm, - options, - uncertainty, +def transform_img( + bidsfilepath, + bidsfileentities, + origfilepath, + transformpath, + targetspace, + invert=False, + interpolation="nearestneighbor", ): - print("writing data transformed to the appropriate sterotaxic space") - for m, t in transforms[options["test_scan"]].items(): - xfrm = ants.read_transform(t) - if invert_xfrm: - xfrm = xfrm.invert() - if uncertainty: - pred_var_xfmd = ants.apply_ants_transform_to_image( - transform=xfrm, - image=pred_var_img, - reference=ants.image_read(orig_files[m]), - interpolation="nearestneighbor", - ) - pred_var_xfmd.to_filename( - os.path.join( - options["pred_folder"], - options["test_var_name"].replace( - ".nii.gz", "_native-" + m + ".nii.gz" - ), - ) - ) - # pred_var_xfmd = ants.resample_image_to_target( - # image=pred_var_xfmd, - # target=ants.image_read(orig_files[m]), - # verbose=True, - # interp_type="nearestNeighbor", - # ) - # pred_var_xfmd.to_filename( - # os.path.join( - # options["pred_folder"], - # options["test_var_name"].replace( - # ".nii.gz", "_native_rsl-" + m + ".nii.gz" - # ), - # ) - # ) - pred_mean_xfmd = ants.apply_ants_transform_to_image( - transform=xfrm, - image=pred_mean_img, - reference=ants.image_read(orig_files[m]), - interpolation="nearestneighbor", - ) - pred_mean_xfmd.to_filename( - os.path.join( - options["pred_folder"], - options["test_mean_name"].replace( - ".nii.gz", "_native-" + m + ".nii.gz" - ), + print(f"writing data transformed to the {targetspace} space") + t = ants.read_transform(transformpath) + if invert: + t = t.invert() + + img = ants.image_read(bidsfilepath) + origimg = ants.image_read(origfilepath) + img_t = ants.apply_ants_transform_to_image( + transform=t, + image=img, + reference=origimg, + interpolation=interpolation, ) - ) - # pred_mean_xfmd = ants.resample_image_to_target( - # image=pred_mean_xfmd, - # target=ants.image_read(orig_files[m]), - # verbose=True, - # interp_type="nearestNeighbor", - # ) - # pred_mean_xfmd.to_filename( - # os.path.join( - # options["pred_folder"], - # options["test_mean_name"].replace( - # ".nii.gz", "_native_rsl-" + m + ".nii.gz" - # ), - # ) - # ) - + outname = bidsfilepath.replace(bidsfileentities['space'], targetspace) + img_t.to_filename(outname) def test_scan( model, @@ -559,7 +520,7 @@ def test_scan( options, transit=None, save_nifti=False, - uncertainty=False, + uncertainty=True, #TODO candidate_mask=None, T=20, ): @@ -592,6 +553,7 @@ def test_scan( os.mkdir(test_folder) # compute lesion segmentation in batches of size options['batch_size'] + for batch, centers in load_test_patches( test_x_data, options, @@ -615,15 +577,12 @@ def test_scan( if save_nifti: # out_scan = nib.Nifti1Image(seg_image, np.eye(4)) out_scan = nib.Nifti1Image(seg_image, affine=affine, header=header) - out_scan.to_filename( - os.path.join(options["pred_folder"], options["test_mean_name"]) - ) + out_scan.to_filename(options["test_mean_name"]) if uncertainty: out_scan = nib.Nifti1Image(var_image, affine=affine, header=header) - out_scan.to_filename( - os.path.join(options["pred_folder"], options["test_var_name"]) - ) + out_scan.to_filename(options["test_var_name"]) + if transit is not None: if not os.path.exists(test_folder): diff --git a/app/utils/bayes_uncertainty_utils.py b/app/utils/bayes_uncertainty_utils.py old mode 100644 new mode 100755 diff --git a/app/utils/confidence.py b/app/utils/confidence.py old mode 100644 new mode 100755 diff --git a/app/utils/create_hdf5_patch_dataset.py b/app/utils/create_hdf5_patch_dataset.py old mode 100644 new mode 100755 diff --git a/app/utils/h5data.py b/app/utils/h5data.py old mode 100644 new mode 100755 diff --git a/app/utils/helpers.py b/app/utils/helpers.py old mode 100644 new mode 100755 diff --git a/app/utils/keras_bayes_utils.py b/app/utils/keras_bayes_utils.py old mode 100644 new mode 100755 diff --git a/app/utils/patch_dataloader.py b/app/utils/patch_dataloader.py old mode 100644 new mode 100755 diff --git a/app/utils/post_processor.py b/app/utils/post_processor.py old mode 100644 new mode 100755 index e4df621..3c2de5a --- a/app/utils/post_processor.py +++ b/app/utils/post_processor.py @@ -67,14 +67,14 @@ def post_processing(input_scan, options, header, save_nifti=True): # save the output segmentation as nifti if save_nifti: nii_out = nib.Nifti1Image(output_scan, affine=header.get_qform(), header=header) - nii_out.to_filename(os.path.join(options["pred_folder"], options["test_name"])) + maskpath = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Postproc_mask.nii.gz") + nii_out.to_filename(maskpath) labels_out = nib.Nifti1Image( labels_scan, affine=header.get_qform(), header=header ) - labels_out.to_filename( - os.path.join(options["pred_folder"], options["test_morph_name"]) - ) - return output_scan, pred_labels, count + labelpath = os.path.join(options["pred_folder"], f"{options['fullid']}_space-MNI152NLin2009aSym_acq-{options['experiment']}Postproc_label.nii.gz") + labels_out.to_filename(labelpath) + return maskpath, labelpath, count def extract_lesional_clus(label, input_scan, scan, options): diff --git a/app/weights/noel_deepFCD_dropoutMC_model_1.h5 b/app/weights/noel_deepFCD_dropoutMC_model_1.h5 old mode 100644 new mode 100755 diff --git a/app/weights/noel_deepFCD_dropoutMC_model_2.h5 b/app/weights/noel_deepFCD_dropoutMC_model_2.h5 old mode 100644 new mode 100755 diff --git a/app/weights/vnet_masker_model_best.pth.tar b/app/weights/vnet_masker_model_best.pth.tar old mode 100644 new mode 100755 diff --git a/environment.yml b/environment.yml new file mode 100755 index 0000000..437ee12 --- /dev/null +++ b/environment.yml @@ -0,0 +1,11 @@ +name: deepfcd +channels: + - conda-forge + - defaults +dependencies: + - python=3.7 + - keras=2.2.4 + - theano=1.0.4 + - pytorch=1.11.0 + - h5py=2.10.0 + - pip \ No newline at end of file From 310644f6562105484976b64f09b308c9ccb73e2e Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Wed, 13 Sep 2023 09:23:27 +0200 Subject: [PATCH 2/7] update readme --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) mode change 100644 => 100755 README.md diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 0280293..8d17d8d --- a/README.md +++ b/README.md @@ -1,3 +1,17 @@ +cut the crap, this code "works" with bids data and outputs bidsdata (somewhat) + +***run*** + +python app/inference_bids.py -bp -sp -dev cuda -pp -bm + +***install*** + +conda env create -f environment.yml +conda activate deepfcd +pip install -r app/deepMask/app/requirements.txt +pip install -r app/requirements.txt + +

Code repository for:
Multicenter Validated Detection of Focal Cortical Dysplasia using Deep Learning
From bce71344a86f13ae3c4577259a0eec20a2f9f4bd Mon Sep 17 00:00:00 2001 From: Lennart Walger <17471740+1-w@users.noreply.github.com> Date: Wed, 13 Sep 2023 09:24:07 +0200 Subject: [PATCH 3/7] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 8d17d8d..8e6f846 100755 --- a/README.md +++ b/README.md @@ -7,8 +7,11 @@ python app/inference_bids.py -bp -sp -dev cuda -pp -bm ***install*** conda env create -f environment.yml + conda activate deepfcd + pip install -r app/deepMask/app/requirements.txt + pip install -r app/requirements.txt From b1947db782fce8598609a57a54a432cf5bcd1056 Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Thu, 14 Sep 2023 10:04:01 +0200 Subject: [PATCH 4/7] updated env file to get rid of pip install requirements --- environment.yml | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/environment.yml b/environment.yml index 437ee12..5cafe74 100755 --- a/environment.yml +++ b/environment.yml @@ -4,8 +4,35 @@ channels: - defaults dependencies: - python=3.7 - - keras=2.2.4 - - theano=1.0.4 - - pytorch=1.11.0 - - h5py=2.10.0 - - pip \ No newline at end of file + - pip + - pip: + - antspyx==0.3.8 --only-binary=antspyx + - antspynet==0.2.0 + - git+https://github.com/ravnoor/atlasreader@master#egg=atlasreader + - Theano==1.0.4 + - keras==2.2.4 + - h5py==2.10.0 + - matplotlib==3.5.1 + - mo-dots==9.147.22086 + - nibabel==3.2.2 + - nilearn==0.9.1 + - numpy==1.18.5 + - pandas==1.3.5 + - psutil==5.9.2 + - scikit-image==0.19.2 + - scikit-learn==1.0.2 + - scipy==1.7.3 + - setproctitle==1.2.3 + - tabulate==0.9.0 + - tqdm==4.64.0 + - xlrd==2.0.1 + - tensorflow==1.15.5 + - tensorflow_probability==0.8 + - --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu + - gast<=0.4.0,>=0.2.1 + - torchsummary==1.5.1 + - torch==1.8.2 + - torchvision==0.9.2 + - pybids + - protobuf==3.20.3 + - pygpu==0.7.6 From 0f84e2399824d720b66862b01b0c561904234131 Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Thu, 14 Sep 2023 11:28:16 +0200 Subject: [PATCH 5/7] added overwrite option --- app/inference_bids.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/app/inference_bids.py b/app/inference_bids.py index 877b60f..b7c3a30 100755 --- a/app/inference_bids.py +++ b/app/inference_bids.py @@ -147,7 +147,7 @@ x_names = options["x_names"] # seed = options['seed'] -options["dropout_mc"] = False # TODO was True +options["dropout_mc"] = True # TODO was True options["batch_size"] = 350000 options["mini_batch_size"] = 2048 options["load_checkpoint_1"] = True @@ -243,9 +243,12 @@ if np.logical_and(os.path.isfile(pred_mean_fname), os.path.isfile(pred_var_fname)): logging.info("prediction for {} already exists".format(fullid)) - transform_img(pred_mean_fname,bids.layout.parse_file_entities(pred_mean_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) - transform_img(pred_var_fname,bids.layout.parse_file_entities(pred_var_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) - continue + if not args_.overwrite: + transform_img(pred_mean_fname,bids.layout.parse_file_entities(pred_mean_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) + transform_img(pred_var_fname,bids.layout.parse_file_entities(pred_var_fname),orig_files[0],transform_files[0],targetspace=orig_bidsfiles[0].entities['space'],invert=True) + continue + else: + logging.info("overwriting...") options["test_scan"] = fullid From 1780537f59c9a6fb83d0d6e95c701251639c0e54 Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Thu, 14 Sep 2023 11:30:15 +0200 Subject: [PATCH 6/7] added multiple subject ids input --- app/inference_bids.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/inference_bids.py b/app/inference_bids.py index b7c3a30..77a0295 100755 --- a/app/inference_bids.py +++ b/app/inference_bids.py @@ -48,7 +48,7 @@ parser.add_argument('-pp','--preprocess',action='store_true',default=False) parser.add_argument('-o','--overwrite',action='store_true',default=False) parser.add_argument('-dev','--device',default='cpu') -parser.add_argument('-s','--subject',default=None) +parser.add_argument('-s','--subjects', nargs='+', default=None) args_ = parser.parse_args() @@ -62,7 +62,7 @@ if args_.subject is None: subjects = orig_ds.get_subjects() else: - subjects = [args_.subject.replace('sub-','')] + subjects = [s.replace('sub-','') for s in args_.subject] print(subjects) # GPU/CPU options @@ -131,7 +131,7 @@ if args_.subject is None: subjects = proc_ds.get_subjects() else: - subjects = [args_.subject.replace('sub-','')] + subjects = [s.replace('sub-','') for s in args_.subject] print(subjects) print(proc_ds) From 9a2a91c3ff97e577377a3facda14bebb836cd4c7 Mon Sep 17 00:00:00 2001 From: Lennart Walger <> Date: Thu, 14 Sep 2023 11:33:11 +0200 Subject: [PATCH 7/7] bug fix --- app/inference_bids.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/inference_bids.py b/app/inference_bids.py index 77a0295..0436ba3 100755 --- a/app/inference_bids.py +++ b/app/inference_bids.py @@ -59,10 +59,10 @@ orig_ds = BIDSLayout(args_.bidspath, validate=False) print(orig_ds) -if args_.subject is None: +if args_.subjects is None: subjects = orig_ds.get_subjects() else: - subjects = [s.replace('sub-','') for s in args_.subject] + subjects = [s.replace('sub-','') for s in args_.subjects] print(subjects) # GPU/CPU options @@ -128,10 +128,10 @@ ) proc_ds = BIDSLayout(outdir, validate=False) -if args_.subject is None: +if args_.subjects is None: subjects = proc_ds.get_subjects() else: - subjects = [s.replace('sub-','') for s in args_.subject] + subjects = [s.replace('sub-','') for s in args_.subjects] print(subjects) print(proc_ds)