From 945c0569041c27a6323693db8a8672f8ea2d7b3a Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 12 Jan 2021 14:39:15 -0600 Subject: [PATCH 01/15] First iteration, working tractometry script --- scilpy/utils/metrics_tools.py | 59 ++++++++++++- scripts/scil_analyse_lesions_load.py | 122 +++++++++++++++++++++++++++ 2 files changed, 178 insertions(+), 3 deletions(-) create mode 100644 scripts/scil_analyse_lesions_load.py diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 0f9dcf875..972465a4e 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -6,11 +6,64 @@ import matplotlib import matplotlib.pyplot as plt import numpy as np +import scipy.ndimage as ndi from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map from scilpy.utils.filenames import split_name_with_nii +def compute_lesions_stats(map_data, lesions_data, single_label=True, + voxel_sizes=[1.0, 1.0, 1.0], min_lesions_vol=7): + """ + Returns information related to lesions inside of a binary mask or voxel + labels map (bundle, for tractometry). + + Parameters + ------------ + map_data : StatefulTractogram + Either a binary mask (uint8) or a voxel labels map (int16). + lesions_data : np.ndarray (3) + Binary mask of lesions. Should be uint8. + voxel_sizes : np.ndarray (3) + If not specified, returns voxel count (instead of volume) + single_label : boolean + If true, does not add an extra layer for number of labels. + Returns + --------- + lesions_load_dict : dict + For each label, volume and lesions count + lesions_atlas : np.ndarray (3) + Labelled lesions atlas + """ + voxel_vol = np.prod(voxel_sizes) + lesions_atlas, _ = ndi.label(lesions_data) + lesions_load_dict = {} + for label in np.unique(map_data)[1:]: + section_dict = {} + tmp_mask = np.zeros(map_data.shape, dtype=np.int16) + tmp_mask[map_data == label] = 1 + tmp_mask *= lesions_atlas + + lesions_vols = [] + for lesion in np.unique(tmp_mask)[1:]: + curr_vol = np.count_nonzero(tmp_mask[tmp_mask == lesion]) \ + * voxel_vol + if curr_vol >= min_lesions_vol: + lesions_vols.append(curr_vol) + + section_dict['total_volume'] = round(np.count_nonzero(tmp_mask), 3) + section_dict['avg_volume'] = round(np.average(lesions_vols), 3) + section_dict['std_volume'] = round(np.std(lesions_vols), 3) + section_dict['lesions_count'] = len(lesions_vols) + + if single_label: + lesions_load_dict = section_dict + else: + lesions_load_dict[str(label).zfill(3)] = section_dict + + return lesions_load_dict, tmp_mask + + def get_bundle_metrics_profiles(sft, metrics_files): """ Returns the profile of each metric along each streamline from a sft. @@ -40,11 +93,11 @@ def _get_profile_one_streamline(streamline, metrics_files): z_ind = np.floor(streamline[:, 2]).astype(np.int) return list(map(lambda metric_file: metric_file[x_ind, y_ind, z_ind], - metrics_files)) + metrics_files)) # We preload the data to avoid loading it for each streamline metrics_data = list(map(lambda metric_file: metric_file.get_fdata(dtype=np.float64), - metrics_files)) + metrics_files)) # The root list has S elements, where S == the number of streamlines. # Each element from S is a sublist with N elements, where N is the number @@ -52,7 +105,7 @@ def _get_profile_one_streamline(streamline, metrics_files): # encountered along the current streamline. metrics_per_strl =\ list(map(lambda strl: _get_profile_one_streamline(strl, metrics_data), - streamlines)) + streamlines)) converted = [] # Here, the zip gives us a list of N tuples, so one tuple for each metric. diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py new file mode 100644 index 000000000..01074c49d --- /dev/null +++ b/scripts/scil_analyse_lesions_load.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This script will output informations about lesions load of a bundle. +Either using as streamlines, binary map or a bundle voxel labels map. +""" + +import argparse +import json + +import nibabel as nib +import numpy as np +from scilpy.io.image import get_data_as_mask, get_data_as_label +from scilpy.io.streamlines import load_tractogram_with_reference +from scilpy.io.utils import (add_overwrite_arg, + assert_inputs_exist, + add_json_args, + assert_outputs_exist, + add_reference_arg) +from scilpy.segment.streamlines import filter_grid_roi +from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map +from scilpy.utils.metrics_tools import compute_lesions_stats + + +def _build_arg_parser(): + p = argparse.ArgumentParser(description=__doc__, + formatter_class=argparse.RawTextHelpFormatter) + + p.add_argument('in_lesions', + help='Binary mask of the lesions (.nii.gz).') + p.add_argument('out_json', + help='Output dictionary of lesions information (.json)') + p1 = p.add_mutually_exclusive_group() + p1.add_argument('--bundle', + help='Path of the bundle file (.trk).') + p1.add_argument('--bundle_mask', + help='Path of the bundle binary mask.') + p1.add_argument('--bundle_labels_map', + help='Path of the bundle labels_map.') + + p.add_argument('--min_lesions_vol', type=float, default=7, + help='Minimum lesions volume in mm3 [%(default)s].') + p.add_argument('--out_atlas', + help='Save the lesions as an atlas.') + p.add_argument('--out_lesions_stats', + help='Save the lesions-wise streamlines count (.json).') + + add_json_args(p) + add_overwrite_arg(p) + add_reference_arg(p) + + return p + + +def main(): + parser = _build_arg_parser() + args = parser.parse_args() + + if (not args.bundle) and (not args.bundle_mask) \ + and (not args.bundle_labels_map): + parser.error('One of the option --bundle or --map must be used') + + assert_inputs_exist(parser, [args.in_lesions], + optional=[args.bundle, args.bundle_mask, + args.bundle_labels_map]) + assert_outputs_exist(parser, args, args.out_json, args.out_atlas) + + lesions_img = nib.load(args.in_lesions) + lesions_data = get_data_as_mask(lesions_img) + + if args.bundle: + sft = load_tractogram_with_reference(parser, args, args.bundle) + sft.to_vox() + sft.to_corner() + streamlines = sft.get_streamlines_copy() + map_data = compute_tract_counts_map(streamlines, + lesions_data.shape) + map_data[map_data > 0] = 1 + elif args.bundle_mask: + map_img = nib.load(args.bundle_mask) + map_data = get_data_as_mask(map_img) + else: + map_img = nib.load(args.bundle_labels_map) + map_data = get_data_as_label(map_img) + + is_single_label = args.bundle_labels_map is None + voxel_sizes = lesions_img.header.get_zooms()[0:3] + + lesions_load_dict, lesions_atlas = compute_lesions_stats( + map_data, lesions_data, single_label=is_single_label, + voxel_sizes=voxel_sizes, min_lesions_vol=args.min_lesions_vol) + + with open(args.out_json, 'w') as outfile: + json.dump(lesions_load_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) + + if args.out_atlas: + nib.save(nib.Nifti1Image(lesions_atlas, lesions_img.affine), + args.out_atlas) + + lesions_dict = {} + if args.out_lesions_stats: + for lesion in np.unique(lesions_atlas)[1:]: + curr_vol = np.count_nonzero(lesions_atlas[lesions_atlas == lesion]) \ + * np.prod(voxel_sizes) + if curr_vol >= args.min_lesions_vol: + if args.bundle: + tmp = np.zeros(lesions_atlas.shape) + tmp[lesions_atlas == lesion] = 1 + new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) + + lesions_dict[str(lesion).zfill(3)] = { + 'volume': curr_vol, 'streamlines_count': len(new_sft)} + + with open(args.out_lesions_stats, 'w') as outfile: + json.dump(lesions_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) + + +if __name__ == "__main__": + main() From 6436b249c98d93179ded5f754a0d4323bad4a4e5 Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 12 Jan 2021 18:20:12 -0600 Subject: [PATCH 02/15] Added to connectivity --- scilpy/io/utils.py | 2 +- scilpy/utils/metrics_tools.py | 55 ++++++++----- scripts/scil_analyse_lesions_load.py | 13 +++- scripts/scil_compute_connectivity.py | 112 ++++++++++++++++++++++----- 4 files changed, 136 insertions(+), 46 deletions(-) diff --git a/scilpy/io/utils.py b/scilpy/io/utils.py index 7b7ac4b6d..04f270bff 100644 --- a/scilpy/io/utils.py +++ b/scilpy/io/utils.py @@ -310,7 +310,7 @@ def check(path): if check_dir_exists: path_dir = os.path.dirname(path) if path_dir and not os.path.isdir(path_dir): - parser.error('Directory {} \n for a given output file ' + parser.error('Directory {}/ \n for a given output file ' 'does not exists.'.format(path_dir)) if isinstance(required, str): diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 972465a4e..a397b6c2c 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -6,14 +6,14 @@ import matplotlib import matplotlib.pyplot as plt import numpy as np -import scipy.ndimage as ndi from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map from scilpy.utils.filenames import split_name_with_nii -def compute_lesions_stats(map_data, lesions_data, single_label=True, - voxel_sizes=[1.0, 1.0, 1.0], min_lesions_vol=7): +def compute_lesions_stats(map_data, lesions_atlas, single_label=True, + voxel_sizes=[1.0, 1.0, 1.0], min_lesions_vol=7, + computed_lesions_labels=None): """ Returns information related to lesions inside of a binary mask or voxel labels map (bundle, for tractometry). @@ -22,46 +22,59 @@ def compute_lesions_stats(map_data, lesions_data, single_label=True, ------------ map_data : StatefulTractogram Either a binary mask (uint8) or a voxel labels map (int16). - lesions_data : np.ndarray (3) - Binary mask of lesions. Should be uint8. - voxel_sizes : np.ndarray (3) - If not specified, returns voxel count (instead of volume) + lesions_atlas : np.ndarray (3) + Labelled atlas of lesions. Should be int16. single_label : boolean If true, does not add an extra layer for number of labels. + voxel_sizes : np.ndarray (3) + If not specified, returns voxel count (instead of volume) + min_lesions_vol : float + Minimum lesions volume in mm3 (default: 7, cross-shape). + computed_lesions_labels : np.ndarray (N) + For connectivity analysis, when the unique lesions labels are know, + provided a pre-computed list of labels save computation. Returns --------- lesions_load_dict : dict For each label, volume and lesions count - lesions_atlas : np.ndarray (3) - Labelled lesions atlas """ voxel_vol = np.prod(voxel_sizes) - lesions_atlas, _ = ndi.label(lesions_data) lesions_load_dict = {} - for label in np.unique(map_data)[1:]: + + if single_label: + labels_list = [1] + else: + labels_list = np.unique(map_data)[1:] + + for label in labels_list: section_dict = {} - tmp_mask = np.zeros(map_data.shape, dtype=np.int16) - tmp_mask[map_data == label] = 1 - tmp_mask *= lesions_atlas + if not single_label: + tmp_mask = np.zeros(map_data.shape, dtype=np.int16) + tmp_mask[map_data == label] = 1 + tmp_mask *= lesions_atlas + else: + tmp_mask = lesions_atlas * map_data lesions_vols = [] - for lesion in np.unique(tmp_mask)[1:]: + if computed_lesions_labels is None: + computed_lesions_labels = np.unique(tmp_mask)[1:] + for lesion in computed_lesions_labels: curr_vol = np.count_nonzero(tmp_mask[tmp_mask == lesion]) \ * voxel_vol if curr_vol >= min_lesions_vol: lesions_vols.append(curr_vol) - - section_dict['total_volume'] = round(np.count_nonzero(tmp_mask), 3) - section_dict['avg_volume'] = round(np.average(lesions_vols), 3) - section_dict['std_volume'] = round(np.std(lesions_vols), 3) - section_dict['lesions_count'] = len(lesions_vols) + if lesions_vols: + section_dict['total_volume'] = round(np.count_nonzero(tmp_mask), 3) + section_dict['avg_volume'] = round(np.average(lesions_vols), 3) + section_dict['std_volume'] = round(np.std(lesions_vols), 3) + section_dict['lesions_count'] = len(lesions_vols) if single_label: lesions_load_dict = section_dict else: lesions_load_dict[str(label).zfill(3)] = section_dict - return lesions_load_dict, tmp_mask + return lesions_load_dict def get_bundle_metrics_profiles(sft, metrics_files): diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index 01074c49d..b49e500a8 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -11,6 +11,8 @@ import nibabel as nib import numpy as np +import scipy.ndimage as ndi + from scilpy.io.image import get_data_as_mask, get_data_as_label from scilpy.io.streamlines import load_tractogram_with_reference from scilpy.io.utils import (add_overwrite_arg, @@ -86,9 +88,10 @@ def main(): is_single_label = args.bundle_labels_map is None voxel_sizes = lesions_img.header.get_zooms()[0:3] + lesions_atlas, _ = ndi.label(lesions_data) - lesions_load_dict, lesions_atlas = compute_lesions_stats( - map_data, lesions_data, single_label=is_single_label, + lesions_load_dict = compute_lesions_stats( + map_data, lesions_atlas, single_label=is_single_label, voxel_sizes=voxel_sizes, min_lesions_vol=args.min_lesions_vol) with open(args.out_json, 'w') as outfile: @@ -96,6 +99,7 @@ def main(): sort_keys=args.sort_keys, indent=args.indent) if args.out_atlas: + lesions_atlas *= map_data.astype(np.bool) nib.save(nib.Nifti1Image(lesions_atlas, lesions_img.affine), args.out_atlas) @@ -105,13 +109,14 @@ def main(): curr_vol = np.count_nonzero(lesions_atlas[lesions_atlas == lesion]) \ * np.prod(voxel_sizes) if curr_vol >= args.min_lesions_vol: + key = str(lesion).zfill(3) + lesions_dict[key] = {'volume': curr_vol} if args.bundle: tmp = np.zeros(lesions_atlas.shape) tmp[lesions_atlas == lesion] = 1 new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) - lesions_dict[str(lesion).zfill(3)] = { - 'volume': curr_vol, 'streamlines_count': len(new_sft)} + lesions_dict[key]['streamlines_count'] = len(new_sft) with open(args.out_lesions_stats, 'w') as outfile: json.dump(lesions_dict, outfile, diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index 3464f1311..e9dc3a291 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -13,8 +13,9 @@ This script only generates matrices in the form of array, does not visualize or reorder the labels (node). -The parameter --similarity expects a folder with density maps (LABEL1_LABEL2.nii.gz) -following the same naming convention as the input directory. +The parameter --similarity expects a folder with density maps +(LABEL1_LABEL2.nii.gz) following the same naming convention as the input +directory. The bundles should be averaged version in the same space. This will compute the weighted-dice between each node and their homologuous average version. @@ -27,6 +28,13 @@ pre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention as the input directory. Each will generate a matrix. The average non-zeros value in the map will be reported in the matrices nodes. + +The parameters --lesions_load will compute 3 lesions related matrices: +lesions_count.npy, lesions_vol.npy, lesions_sc.npy and put it inside of a +specified folder. They represent the number of lesions, the total volume of +lesions and the total of streamlines going through the lesions for of each +connection. Each connection can be seen as a 'bundle' and then something +similar to scil_analyse_lesions_load.py is run for each 'bundle'. """ import argparse @@ -39,11 +47,13 @@ import coloredlogs from dipy.io.utils import is_header_compatible, get_reference_info from dipy.tracking.streamlinespeed import length +from dipy.tracking.vox2track import _streamlines_in_mask import h5py import nibabel as nib import numpy as np +import scipy.ndimage as ndi -from scilpy.io.image import get_data_as_label +from scilpy.io.image import get_data_as_label, get_data_as_mask from scilpy.io.streamlines import reconstruct_streamlines_from_hdf5 from scilpy.io.utils import (add_overwrite_arg, add_processes_arg, add_verbose_arg, @@ -51,6 +61,7 @@ validate_nbr_processes) from scilpy.tractanalysis.reproducibility_measures import compute_bundle_adjacency_voxel from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map +from scilpy.utils.metrics_tools import compute_lesions_stats def load_node_nifti(directory, in_label, out_label, ref_img): @@ -132,6 +143,7 @@ def _processing_wrapper(args): measures_to_compute.remove('similarity') for measure in measures_to_compute: + # Maps if isinstance(measure, str) and os.path.isdir(measure): map_dirname = measure map_data = load_node_nifti(map_dirname, @@ -139,23 +151,59 @@ def _processing_wrapper(args): labels_img) measures_to_return[map_dirname] = np.average( map_data[map_data > 0]) - elif isinstance(measure, tuple) and os.path.isfile(measure[0]): - metric_filename = measure[0] - metric_img = measure[1] - if not is_header_compatible(metric_img, labels_img): - logging.error('{} do not have a compatible header'.format( - metric_filename)) - raise IOError - - metric_data = metric_img.get_fdata(dtype=np.float64) - if weighted: - density = density / np.max(density) - voxels_value = metric_data * density - voxels_value = voxels_value[voxels_value > 0] + elif isinstance(measure, tuple): + if not isinstance(measure[0], tuple) \ + and os.path.isfile(measure[0]): + metric_filename = measure[0] + metric_img = measure[1] + if not is_header_compatible(metric_img, labels_img): + logging.error('{} do not have a compatible header'.format( + metric_filename)) + raise IOError + + metric_data = metric_img.get_fdata(dtype=np.float64) + if weighted: + density = density / np.max(density) + voxels_value = metric_data * density + voxels_value = voxels_value[voxels_value > 0] + else: + voxels_value = metric_data[density > 0] + + measures_to_return[metric_filename] = np.average(voxels_value) + # Lesions else: - voxels_value = metric_data[density > 0] - - measures_to_return[metric_filename] = np.average(voxels_value) + lesions_filename = measure[0][0] + computed_lesions_labels = measure[0][1] + lesions_img = measure[1] + if not is_header_compatible(lesions_img, labels_img): + logging.error('{} do not have a compatible header'.format( + lesions_filename)) + raise IOError + + voxel_sizes = lesions_img.header.get_zooms()[0:3] + lesions_img.set_filename('tmp.nii.gz') + lesions_atlas = get_data_as_label(lesions_img) + tmp_dict = compute_lesions_stats( + density.astype(np.bool), lesions_atlas, + voxel_sizes=voxel_sizes, single_label=True, + computed_lesions_labels=computed_lesions_labels) + tmp_ind = _streamlines_in_mask(list(streamlines), + lesions_atlas.astype(np.uint8), + np.eye(3), [0, 0, 0]) + streamlines_count = len( + np.where(tmp_ind == [0, 1][True])[0].tolist()) + + if tmp_dict: + measures_to_return[lesions_filename+'vol'] = \ + tmp_dict['total_volume'] + measures_to_return[lesions_filename+'count'] = \ + tmp_dict['lesions_count'] + measures_to_return[lesions_filename+'sc'] = \ + streamlines_count + else: + measures_to_return[lesions_filename+'vol'] = 0 + measures_to_return[lesions_filename+'count'] = 0 + measures_to_return[lesions_filename+'sc'] = 0 if include_dps: for dps_key in hdf5_file[key].keys(): @@ -197,6 +245,9 @@ def _build_arg_parser(): metavar=('IN_FILE', 'OUT_FILE'), help='Input (.nii.gz). and output file (.npy) for a metric ' 'weighted matrix.') + p.add_argument('--lesions_load', nargs=2, metavar=('IN_FILE', 'OUT_DIR'), + help='Input binary mask (.nii.gz) and output directory ' + 'for all lesions related matrices.') p.add_argument('--density_weighting', action="store_true", help='Use density-weighting for the metric weighted matrix.') @@ -264,6 +315,25 @@ def main(): dict_metrics_out_name[in_name] = out_name measures_output_filename.append(out_name) + dict_lesions_out_name = {} + if args.lesions_load is not None: + in_name = args.lesions_load[0] + lesions_img = nib.load(in_name) + lesions_data = get_data_as_mask(lesions_img) + lesions_atlas, _ = ndi.label(lesions_data) + measures_to_compute.append(((in_name, np.unique(lesions_atlas)[1:]), + nib.Nifti1Image(lesions_atlas, + lesions_img.affine))) + + out_name_1 = os.path.join(args.lesions_load[1], 'lesions_vol.npy') + out_name_2 = os.path.join(args.lesions_load[1], 'lesions_count.npy') + out_name_3 = os.path.join(args.lesions_load[1], 'lesions_sc.npy') + + dict_lesions_out_name[in_name+'vol'] = out_name_1 + dict_lesions_out_name[in_name+'count'] = out_name_2 + dict_lesions_out_name[in_name+'sc'] = out_name_3 + measures_output_filename.extend([out_name_1, out_name_2, out_name_3]) + assert_outputs_exist(parser, args, measures_output_filename) if not measures_to_compute: parser.error('No connectivity measures were selected, nothing ' @@ -334,6 +404,7 @@ def main(): # Filling out all the matrices (symmetric) in the order of labels_list nbr_of_measures = len(list(measures_dict.values())[0]) matrix = np.zeros((len(labels_list), len(labels_list), nbr_of_measures)) + for in_label, out_label in measures_dict: curr_node_dict = measures_dict[(in_label, out_label)] measures_ordering = list(curr_node_dict.keys()) @@ -341,7 +412,6 @@ def main(): for i, measure in enumerate(curr_node_dict): in_pos = labels_list.index(in_label) out_pos = labels_list.index(out_label) - matrix[in_pos, out_pos, i] = curr_node_dict[measure] matrix[out_pos, in_pos, i] = curr_node_dict[measure] @@ -359,6 +429,8 @@ def main(): matrix_basename = dict_metrics_out_name[measure] elif measure in dict_maps_out_name: matrix_basename = dict_maps_out_name[measure] + elif measure in dict_lesions_out_name: + matrix_basename = dict_lesions_out_name[measure] else: matrix_basename = measure From 89bd1f10f073016754cb9d99dc0d0e1dd285f97f Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 13 Jan 2021 14:15:46 -0600 Subject: [PATCH 03/15] Split all json using out_dir --- scilpy/utils/metrics_tools.py | 23 +++++--- scripts/scil_analyse_lesions_load.py | 82 ++++++++++++++++++++++------ scripts/scil_compute_connectivity.py | 2 +- 3 files changed, 83 insertions(+), 24 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index a397b6c2c..e3564aed0 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -13,7 +13,7 @@ def compute_lesions_stats(map_data, lesions_atlas, single_label=True, voxel_sizes=[1.0, 1.0, 1.0], min_lesions_vol=7, - computed_lesions_labels=None): + precomputed_lesions_labels=None): """ Returns information related to lesions inside of a binary mask or voxel labels map (bundle, for tractometry). @@ -30,7 +30,7 @@ def compute_lesions_stats(map_data, lesions_atlas, single_label=True, If not specified, returns voxel count (instead of volume) min_lesions_vol : float Minimum lesions volume in mm3 (default: 7, cross-shape). - computed_lesions_labels : np.ndarray (N) + precomputed_lesions_labels : np.ndarray (N) For connectivity analysis, when the unique lesions labels are know, provided a pre-computed list of labels save computation. Returns @@ -56,18 +56,26 @@ def compute_lesions_stats(map_data, lesions_atlas, single_label=True, tmp_mask = lesions_atlas * map_data lesions_vols = [] - if computed_lesions_labels is None: + if precomputed_lesions_labels is None: computed_lesions_labels = np.unique(tmp_mask)[1:] + else: + computed_lesions_labels = precomputed_lesions_labels + for lesion in computed_lesions_labels: curr_vol = np.count_nonzero(tmp_mask[tmp_mask == lesion]) \ * voxel_vol if curr_vol >= min_lesions_vol: lesions_vols.append(curr_vol) if lesions_vols: - section_dict['total_volume'] = round(np.count_nonzero(tmp_mask), 3) + section_dict['total_volume'] = round(np.sum(curr_vol), 3) section_dict['avg_volume'] = round(np.average(lesions_vols), 3) section_dict['std_volume'] = round(np.std(lesions_vols), 3) section_dict['lesions_count'] = len(lesions_vols) + else: + section_dict['total_volume'] = 0 + section_dict['avg_volume'] = 0 + section_dict['std_volume'] = 0 + section_dict['lesions_count'] = 0 if single_label: lesions_load_dict = section_dict @@ -193,7 +201,8 @@ def get_bundle_metrics_mean_std(streamlines, metrics_files, def get_bundle_metrics_mean_std_per_point(streamlines, bundle_name, distances_to_centroid_streamline, - metrics, labels, density_weighting=False, + metrics, labels, + density_weighting=False, distance_weighting=False): """ Compute the mean and std PER POiNT of the bundle for every given metric. @@ -225,8 +234,8 @@ def get_bundle_metrics_mean_std_per_point(streamlines, bundle_name, unique_labels = np.unique(labels) num_digits_labels = len(str(np.max(unique_labels))) if density_weighting: - track_count = compute_tract_counts_map(streamlines, - metrics[0].shape).astype(np.float64) + track_count = compute_tract_counts_map( + streamlines, metrics[0].shape).astype(np.float64) else: track_count = np.ones(metrics[0].shape) diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index b49e500a8..0e4958c58 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -8,20 +8,23 @@ import argparse import json +import os import nibabel as nib import numpy as np import scipy.ndimage as ndi + from scilpy.io.image import get_data_as_mask, get_data_as_label from scilpy.io.streamlines import load_tractogram_with_reference from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, add_json_args, - assert_outputs_exist, + assert_output_dirs_exist_and_empty, add_reference_arg) from scilpy.segment.streamlines import filter_grid_roi from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map +from scilpy.utils.filenames import split_name_with_nii from scilpy.utils.metrics_tools import compute_lesions_stats @@ -31,8 +34,8 @@ def _build_arg_parser(): p.add_argument('in_lesions', help='Binary mask of the lesions (.nii.gz).') - p.add_argument('out_json', - help='Output dictionary of lesions information (.json)') + p.add_argument('out_dir', + help='Output directory for lesions information') p1 = p.add_mutually_exclusive_group() p1.add_argument('--bundle', help='Path of the bundle file (.trk).') @@ -45,8 +48,8 @@ def _build_arg_parser(): help='Minimum lesions volume in mm3 [%(default)s].') p.add_argument('--out_atlas', help='Save the lesions as an atlas.') - p.add_argument('--out_lesions_stats', - help='Save the lesions-wise streamlines count (.json).') + p.add_argument('--out_lesions_stats', action='store_true', + help='Save the lesions-wise & streamlines count.') add_json_args(p) add_overwrite_arg(p) @@ -55,6 +58,13 @@ def _build_arg_parser(): return p +def _save_json_wrapper(args, out_name, out_dict): + filename = os.path.join(args.out_dir, out_name) + with open(filename, 'w') as outfile: + json.dump(out_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) + + def main(): parser = _build_arg_parser() args = parser.parse_args() @@ -66,12 +76,13 @@ def main(): assert_inputs_exist(parser, [args.in_lesions], optional=[args.bundle, args.bundle_mask, args.bundle_labels_map]) - assert_outputs_exist(parser, args, args.out_json, args.out_atlas) + assert_output_dirs_exist_and_empty(parser, args, args.out_dir) lesions_img = nib.load(args.in_lesions) lesions_data = get_data_as_mask(lesions_img) if args.bundle: + bundle_name, _ = split_name_with_nii(os.path.basename(args.bundle)) sft = load_tractogram_with_reference(parser, args, args.bundle) sft.to_vox() sft.to_corner() @@ -80,9 +91,13 @@ def main(): lesions_data.shape) map_data[map_data > 0] = 1 elif args.bundle_mask: + bundle_name, _ = split_name_with_nii( + os.path.basename(args.bundle_mask)) map_img = nib.load(args.bundle_mask) map_data = get_data_as_mask(map_img) else: + bundle_name, _ = split_name_with_nii(os.path.basename( + args.bundle_labels_map)) map_img = nib.load(args.bundle_labels_map) map_data = get_data_as_label(map_img) @@ -94,17 +109,13 @@ def main(): map_data, lesions_atlas, single_label=is_single_label, voxel_sizes=voxel_sizes, min_lesions_vol=args.min_lesions_vol) - with open(args.out_json, 'w') as outfile: - json.dump(lesions_load_dict, outfile, - sort_keys=args.sort_keys, indent=args.indent) - if args.out_atlas: lesions_atlas *= map_data.astype(np.bool) nib.save(nib.Nifti1Image(lesions_atlas, lesions_img.affine), args.out_atlas) - lesions_dict = {} if args.out_lesions_stats: + lesions_dict = {} for lesion in np.unique(lesions_atlas)[1:]: curr_vol = np.count_nonzero(lesions_atlas[lesions_atlas == lesion]) \ * np.prod(voxel_sizes) @@ -115,12 +126,51 @@ def main(): tmp = np.zeros(lesions_atlas.shape) tmp[lesions_atlas == lesion] = 1 new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) + lesions_dict[key]['strs_count'] = len(new_sft) + + if is_single_label: + total_vol_dict = {} + avg_volume_dict = {} + std_volume_dict = {} + lesions_count_dict = {} + total_vol_dict[bundle_name] = lesions_load_dict['total_volume'] + avg_volume_dict[bundle_name] = lesions_load_dict['avg_volume'] + std_volume_dict[bundle_name] = lesions_load_dict['std_volume'] + lesions_count_dict[bundle_name] = lesions_load_dict['lesions_count'] + else: + total_vol_dict = {bundle_name: {}} + avg_volume_dict = {bundle_name: {}} + std_volume_dict = {bundle_name: {}} + lesions_count_dict = {bundle_name: {}} + for key in lesions_load_dict.keys(): + total_vol_dict[bundle_name][key] = \ + lesions_load_dict[key]['total_volume'] + avg_volume_dict[bundle_name][key] = \ + lesions_load_dict[key]['avg_volume'] + std_volume_dict[bundle_name][key] = \ + lesions_load_dict[key]['std_volume'] + lesions_count_dict[bundle_name][key] = \ + lesions_load_dict[key]['lesions_count'] + + _save_json_wrapper(args, 'sum_volume_per_label.json', total_vol_dict) + _save_json_wrapper(args, 'avg_volume_per_label.json', avg_volume_dict) + _save_json_wrapper(args, 'std_volume_per_label.json', std_volume_dict) + _save_json_wrapper(args, 'lesions_count_per_label.json', + lesions_count_dict) - lesions_dict[key]['streamlines_count'] = len(new_sft) - - with open(args.out_lesions_stats, 'w') as outfile: - json.dump(lesions_dict, outfile, - sort_keys=args.sort_keys, indent=args.indent) + if args.out_lesions_stats: + vol_dict = {bundle_name: {}} + streamlines_count_dict = {bundle_name: {}} + for key in lesions_dict.keys(): + vol_dict[bundle_name][key] = lesions_dict[key]['volume'] + if args.bundle: + streamlines_count_dict[bundle_name][key] = \ + lesions_dict[key]['strs_count'] + + _save_json_wrapper(args, 'volume_per_lesion.json', vol_dict) + if args.bundle: + _save_json_wrapper(args, 'streamlines_count_per_lesion.json', + streamlines_count_dict) if __name__ == "__main__": diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index e9dc3a291..0e36aa205 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -186,7 +186,7 @@ def _processing_wrapper(args): tmp_dict = compute_lesions_stats( density.astype(np.bool), lesions_atlas, voxel_sizes=voxel_sizes, single_label=True, - computed_lesions_labels=computed_lesions_labels) + precomputed_lesions_labels=computed_lesions_labels) tmp_ind = _streamlines_in_mask(list(streamlines), lesions_atlas.astype(np.uint8), np.eye(3), [0, 0, 0]) From 958dbb9e4e8788cf8c8e212c51123ed9aed10cdb Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 13 Jan 2021 14:35:02 -0600 Subject: [PATCH 04/15] Refactor the output --- scilpy/utils/metrics_tools.py | 2 +- scripts/scil_compute_connectivity.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index e3564aed0..dbf6dad42 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -67,7 +67,7 @@ def compute_lesions_stats(map_data, lesions_atlas, single_label=True, if curr_vol >= min_lesions_vol: lesions_vols.append(curr_vol) if lesions_vols: - section_dict['total_volume'] = round(np.sum(curr_vol), 3) + section_dict['total_volume'] = round(np.sum(lesions_vols), 3) section_dict['avg_volume'] = round(np.average(lesions_vols), 3) section_dict['std_volume'] = round(np.std(lesions_vols), 3) section_dict['lesions_count'] = len(lesions_vols) diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index 0e36aa205..1915d068e 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -187,6 +187,7 @@ def _processing_wrapper(args): density.astype(np.bool), lesions_atlas, voxel_sizes=voxel_sizes, single_label=True, precomputed_lesions_labels=computed_lesions_labels) + tmp_ind = _streamlines_in_mask(list(streamlines), lesions_atlas.astype(np.uint8), np.eye(3), [0, 0, 0]) @@ -355,7 +356,7 @@ def main(): labels_list = np.loadtxt( args.force_labels_list, dtype=np.int16).tolist() - comb_list = list(itertools.combinations(labels_list, r=2)) + comb_list = list(itertools.combinations(labels_list, r=2))[0:1000] if not args.no_self_connection: comb_list.extend(zip(labels_list, labels_list)) From 4ac70966d7d04d5f9b8bc04aa8d0b4164bd0f838 Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 13 Jan 2021 14:53:13 -0600 Subject: [PATCH 05/15] Remove debug list skip --- scripts/scil_compute_connectivity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index 1915d068e..319439695 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -356,7 +356,7 @@ def main(): labels_list = np.loadtxt( args.force_labels_list, dtype=np.int16).tolist() - comb_list = list(itertools.combinations(labels_list, r=2))[0:1000] + comb_list = list(itertools.combinations(labels_list, r=2)) if not args.no_self_connection: comb_list.extend(zip(labels_list, labels_list)) From 1dffdd72796351b5e3c9057b56b170810703b57d Mon Sep 17 00:00:00 2001 From: frheault Date: Mon, 1 Mar 2021 08:33:56 -0600 Subject: [PATCH 06/15] Lesions to lesion --- scilpy/utils/metrics_tools.py | 62 +++++++++--------- scripts/scil_analyse_lesions_load.py | 96 ++++++++++++++-------------- 2 files changed, 79 insertions(+), 79 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index dbf6dad42..edd79bcaf 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -11,35 +11,35 @@ from scilpy.utils.filenames import split_name_with_nii -def compute_lesions_stats(map_data, lesions_atlas, single_label=True, - voxel_sizes=[1.0, 1.0, 1.0], min_lesions_vol=7, - precomputed_lesions_labels=None): +def compute_lesion_stats(map_data, lesion_atlas, single_label=True, + voxel_sizes=[1.0, 1.0, 1.0], min_lesion_vol=7, + precomputed_lesion_labels=None): """ - Returns information related to lesions inside of a binary mask or voxel + Returns information related to lesion inside of a binary mask or voxel labels map (bundle, for tractometry). Parameters ------------ map_data : StatefulTractogram Either a binary mask (uint8) or a voxel labels map (int16). - lesions_atlas : np.ndarray (3) - Labelled atlas of lesions. Should be int16. + lesion_atlas : np.ndarray (3) + Labelled atlas of lesion. Should be int16. single_label : boolean If true, does not add an extra layer for number of labels. voxel_sizes : np.ndarray (3) If not specified, returns voxel count (instead of volume) - min_lesions_vol : float - Minimum lesions volume in mm3 (default: 7, cross-shape). - precomputed_lesions_labels : np.ndarray (N) - For connectivity analysis, when the unique lesions labels are know, + min_lesion_vol : float + Minimum lesion volume in mm3 (default: 7, cross-shape). + precomputed_lesion_labels : np.ndarray (N) + For connectivity analysis, when the unique lesion labels are know, provided a pre-computed list of labels save computation. Returns --------- - lesions_load_dict : dict - For each label, volume and lesions count + lesion_load_dict : dict + For each label, volume and lesion count """ voxel_vol = np.prod(voxel_sizes) - lesions_load_dict = {} + lesion_load_dict = {} if single_label: labels_list = [1] @@ -51,38 +51,38 @@ def compute_lesions_stats(map_data, lesions_atlas, single_label=True, if not single_label: tmp_mask = np.zeros(map_data.shape, dtype=np.int16) tmp_mask[map_data == label] = 1 - tmp_mask *= lesions_atlas + tmp_mask *= lesion_atlas else: - tmp_mask = lesions_atlas * map_data + tmp_mask = lesion_atlas * map_data - lesions_vols = [] - if precomputed_lesions_labels is None: - computed_lesions_labels = np.unique(tmp_mask)[1:] + lesion_vols = [] + if precomputed_lesion_labels is None: + computed_lesion_labels = np.unique(tmp_mask)[1:] else: - computed_lesions_labels = precomputed_lesions_labels + computed_lesion_labels = precomputed_lesion_labels - for lesion in computed_lesions_labels: + for lesion in computed_lesion_labels: curr_vol = np.count_nonzero(tmp_mask[tmp_mask == lesion]) \ * voxel_vol - if curr_vol >= min_lesions_vol: - lesions_vols.append(curr_vol) - if lesions_vols: - section_dict['total_volume'] = round(np.sum(lesions_vols), 3) - section_dict['avg_volume'] = round(np.average(lesions_vols), 3) - section_dict['std_volume'] = round(np.std(lesions_vols), 3) - section_dict['lesions_count'] = len(lesions_vols) + if curr_vol >= min_lesion_vol: + lesion_vols.append(curr_vol) + if lesion_vols: + section_dict['total_volume'] = round(np.sum(lesion_vols), 3) + section_dict['avg_volume'] = round(np.average(lesion_vols), 3) + section_dict['std_volume'] = round(np.std(lesion_vols), 3) + section_dict['lesion_count'] = len(lesion_vols) else: section_dict['total_volume'] = 0 section_dict['avg_volume'] = 0 section_dict['std_volume'] = 0 - section_dict['lesions_count'] = 0 + section_dict['lesion_count'] = 0 if single_label: - lesions_load_dict = section_dict + lesion_load_dict = section_dict else: - lesions_load_dict[str(label).zfill(3)] = section_dict + lesion_load_dict[str(label).zfill(3)] = section_dict - return lesions_load_dict + return lesion_load_dict def get_bundle_metrics_profiles(sft, metrics_files): diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index 0e4958c58..f6c866255 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- """ -This script will output informations about lesions load of a bundle. +This script will output informations about lesion load in bundle(s). Either using as streamlines, binary map or a bundle voxel labels map. """ @@ -25,17 +25,17 @@ from scilpy.segment.streamlines import filter_grid_roi from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map from scilpy.utils.filenames import split_name_with_nii -from scilpy.utils.metrics_tools import compute_lesions_stats +from scilpy.utils.metrics_tools import compute_lesion_stats def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - p.add_argument('in_lesions', - help='Binary mask of the lesions (.nii.gz).') + p.add_argument('in_lesion', + help='Binary mask of the lesion (.nii.gz).') p.add_argument('out_dir', - help='Output directory for lesions information') + help='Output directory for lesion information') p1 = p.add_mutually_exclusive_group() p1.add_argument('--bundle', help='Path of the bundle file (.trk).') @@ -44,12 +44,12 @@ def _build_arg_parser(): p1.add_argument('--bundle_labels_map', help='Path of the bundle labels_map.') - p.add_argument('--min_lesions_vol', type=float, default=7, - help='Minimum lesions volume in mm3 [%(default)s].') + p.add_argument('--min_lesion_vol', type=float, default=7, + help='Minimum lesion volume in mm3 [%(default)s].') p.add_argument('--out_atlas', - help='Save the lesions as an atlas.') - p.add_argument('--out_lesions_stats', action='store_true', - help='Save the lesions-wise & streamlines count.') + help='Save the lesion as an atlas.') + p.add_argument('--out_lesion_stats', action='store_true', + help='Save the lesion-wise & streamlines count.') add_json_args(p) add_overwrite_arg(p) @@ -73,13 +73,13 @@ def main(): and (not args.bundle_labels_map): parser.error('One of the option --bundle or --map must be used') - assert_inputs_exist(parser, [args.in_lesions], + assert_inputs_exist(parser, [args.in_lesion], optional=[args.bundle, args.bundle_mask, args.bundle_labels_map]) assert_output_dirs_exist_and_empty(parser, args, args.out_dir) - lesions_img = nib.load(args.in_lesions) - lesions_data = get_data_as_mask(lesions_img) + lesion_img = nib.load(args.in_lesion) + lesion_data = get_data_as_mask(lesion_img) if args.bundle: bundle_name, _ = split_name_with_nii(os.path.basename(args.bundle)) @@ -88,7 +88,7 @@ def main(): sft.to_corner() streamlines = sft.get_streamlines_copy() map_data = compute_tract_counts_map(streamlines, - lesions_data.shape) + lesion_data.shape) map_data[map_data > 0] = 1 elif args.bundle_mask: bundle_name, _ = split_name_with_nii( @@ -102,70 +102,70 @@ def main(): map_data = get_data_as_label(map_img) is_single_label = args.bundle_labels_map is None - voxel_sizes = lesions_img.header.get_zooms()[0:3] - lesions_atlas, _ = ndi.label(lesions_data) + voxel_sizes = lesion_img.header.get_zooms()[0:3] + lesion_atlas, _ = ndi.label(lesion_data) - lesions_load_dict = compute_lesions_stats( - map_data, lesions_atlas, single_label=is_single_label, - voxel_sizes=voxel_sizes, min_lesions_vol=args.min_lesions_vol) + lesion_load_dict = compute_lesion_stats( + map_data, lesion_atlas, single_label=is_single_label, + voxel_sizes=voxel_sizes, min_lesion_vol=args.min_lesion_vol) if args.out_atlas: - lesions_atlas *= map_data.astype(np.bool) - nib.save(nib.Nifti1Image(lesions_atlas, lesions_img.affine), + lesion_atlas *= map_data.astype(np.bool) + nib.save(nib.Nifti1Image(lesion_atlas, lesion_img.affine), args.out_atlas) - if args.out_lesions_stats: - lesions_dict = {} - for lesion in np.unique(lesions_atlas)[1:]: - curr_vol = np.count_nonzero(lesions_atlas[lesions_atlas == lesion]) \ + if args.out_lesion_stats: + lesion_dict = {} + for lesion in np.unique(lesion_atlas)[1:]: + curr_vol = np.count_nonzero(lesion_atlas[lesion_atlas == lesion]) \ * np.prod(voxel_sizes) - if curr_vol >= args.min_lesions_vol: + if curr_vol >= args.min_lesion_vol: key = str(lesion).zfill(3) - lesions_dict[key] = {'volume': curr_vol} + lesion_dict[key] = {'volume': curr_vol} if args.bundle: - tmp = np.zeros(lesions_atlas.shape) - tmp[lesions_atlas == lesion] = 1 + tmp = np.zeros(lesion_atlas.shape) + tmp[lesion_atlas == lesion] = 1 new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) - lesions_dict[key]['strs_count'] = len(new_sft) + lesion_dict[key]['strs_count'] = len(new_sft) if is_single_label: total_vol_dict = {} avg_volume_dict = {} std_volume_dict = {} - lesions_count_dict = {} - total_vol_dict[bundle_name] = lesions_load_dict['total_volume'] - avg_volume_dict[bundle_name] = lesions_load_dict['avg_volume'] - std_volume_dict[bundle_name] = lesions_load_dict['std_volume'] - lesions_count_dict[bundle_name] = lesions_load_dict['lesions_count'] + lesion_count_dict = {} + total_vol_dict[bundle_name] = lesion_load_dict['total_volume'] + avg_volume_dict[bundle_name] = lesion_load_dict['avg_volume'] + std_volume_dict[bundle_name] = lesion_load_dict['std_volume'] + lesion_count_dict[bundle_name] = lesion_load_dict['lesion_count'] else: total_vol_dict = {bundle_name: {}} avg_volume_dict = {bundle_name: {}} std_volume_dict = {bundle_name: {}} - lesions_count_dict = {bundle_name: {}} - for key in lesions_load_dict.keys(): + lesion_count_dict = {bundle_name: {}} + for key in lesion_load_dict.keys(): total_vol_dict[bundle_name][key] = \ - lesions_load_dict[key]['total_volume'] + lesion_load_dict[key]['total_volume'] avg_volume_dict[bundle_name][key] = \ - lesions_load_dict[key]['avg_volume'] + lesion_load_dict[key]['avg_volume'] std_volume_dict[bundle_name][key] = \ - lesions_load_dict[key]['std_volume'] - lesions_count_dict[bundle_name][key] = \ - lesions_load_dict[key]['lesions_count'] + lesion_load_dict[key]['std_volume'] + lesion_count_dict[bundle_name][key] = \ + lesion_load_dict[key]['lesion_count'] _save_json_wrapper(args, 'sum_volume_per_label.json', total_vol_dict) _save_json_wrapper(args, 'avg_volume_per_label.json', avg_volume_dict) _save_json_wrapper(args, 'std_volume_per_label.json', std_volume_dict) - _save_json_wrapper(args, 'lesions_count_per_label.json', - lesions_count_dict) + _save_json_wrapper(args, 'lesion_count_per_label.json', + lesion_count_dict) - if args.out_lesions_stats: + if args.out_lesion_stats: vol_dict = {bundle_name: {}} streamlines_count_dict = {bundle_name: {}} - for key in lesions_dict.keys(): - vol_dict[bundle_name][key] = lesions_dict[key]['volume'] + for key in lesion_dict.keys(): + vol_dict[bundle_name][key] = lesion_dict[key]['volume'] if args.bundle: streamlines_count_dict[bundle_name][key] = \ - lesions_dict[key]['strs_count'] + lesion_dict[key]['strs_count'] _save_json_wrapper(args, 'volume_per_lesion.json', vol_dict) if args.bundle: From 69668cf8187743626184765ae5f9e757a0a6b810 Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 2 Mar 2021 10:48:54 -0600 Subject: [PATCH 07/15] Major sync with flow --- scilpy/utils/metrics_tools.py | 42 +++++++------ scripts/scil_analyse_lesions_load.py | 87 ++++++++++--------------- scripts/scil_compute_connectivity.py | 94 ++++++++++++++-------------- scripts/scil_convert_json_to_xlsx.py | 83 +++++++++++++++++++++++- scripts/scil_merge_json.py | 21 +++++++ 5 files changed, 204 insertions(+), 123 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 3b105085f..358c9c238 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -39,15 +39,16 @@ def compute_lesion_stats(map_data, lesion_atlas, single_label=True, For each label, volume and lesion count """ voxel_vol = np.prod(voxel_sizes) - lesion_load_dict = {} if single_label: labels_list = [1] else: - labels_list = np.unique(map_data)[1:] + labels_list = np.unique(map_data)[1:].astype(np.int32) + section_dict = {'lesion_total_volume': {}, 'lesion_volume': {}, + 'lesion_count': {}} for label in labels_list: - section_dict = {} + zlabel = str(label).zfill(3) if not single_label: tmp_mask = np.zeros(map_data.shape, dtype=np.int16) tmp_mask[map_data == label] = 1 @@ -67,22 +68,21 @@ def compute_lesion_stats(map_data, lesion_atlas, single_label=True, if curr_vol >= min_lesion_vol: lesion_vols.append(curr_vol) if lesion_vols: - section_dict['total_volume'] = round(np.sum(lesion_vols), 3) - section_dict['avg_volume'] = round(np.average(lesion_vols), 3) - section_dict['std_volume'] = round(np.std(lesion_vols), 3) - section_dict['lesion_count'] = len(lesion_vols) + section_dict['lesion_total_volume'][zlabel] = round( + np.sum(lesion_vols), 3) + section_dict['lesion_volume'][zlabel] = np.round(lesion_vols, 3).tolist() + section_dict['lesion_count'][zlabel] = float(len(lesion_vols)) else: - section_dict['total_volume'] = 0 - section_dict['avg_volume'] = 0 - section_dict['std_volume'] = 0 - section_dict['lesion_count'] = 0 + section_dict['lesion_total_volume'][zlabel] = 0.0 + section_dict['lesion_volume'][zlabel] = [0.0] + section_dict['lesion_count'][zlabel] = 0.0 - if single_label: - lesion_load_dict = section_dict - else: - lesion_load_dict[str(label).zfill(3)] = section_dict + if single_label: + section_dict = {'lesion_total_volume': section_dict['lesion_total_volume']['001'], + 'lesion_volume': section_dict['lesion_volume']['001'], + 'lesion_count': section_dict['lesion_count']['001']} - return lesion_load_dict + return section_dict def get_bundle_metrics_profiles(sft, metrics_files): @@ -328,13 +328,13 @@ def plot_metrics_stats(means, stds, title=None, xlabel=None, if figlabel is not None: fig.set_label(figlabel) - if means.ndim > 1: + if means.shape[-1] > 1: mean = np.average(means, axis=1) std = np.std(means, axis=1) alpha = 0.5 else: - mean = np.array(means) - std = np.array(stds) + mean = np.array(means).ravel() + std = np.array(stds).ravel() alpha = 0.9 dim = np.arange(1, len(mean)+1, 1) @@ -353,6 +353,10 @@ def plot_metrics_stats(means, stds, title=None, xlabel=None, ax.plot(dim, mean, color="k", linewidth=5, solid_capstyle='round') # Plot the std + # print(mean) + # print(mean - std) + # print(mean + std) + # print() plt.fill_between(dim, mean - std, mean + std, facecolor=fill_color, alpha=alpha) diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index f6c866255..6cab4e1c3 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -3,7 +3,8 @@ """ This script will output informations about lesion load in bundle(s). -Either using as streamlines, binary map or a bundle voxel labels map. +The input can either be streamlines, binary bundle map, or a bundle voxel +label map. """ import argparse @@ -20,7 +21,7 @@ from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, add_json_args, - assert_output_dirs_exist_and_empty, + assert_outputs_exist, add_reference_arg) from scilpy.segment.streamlines import filter_grid_roi from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map @@ -34,8 +35,8 @@ def _build_arg_parser(): p.add_argument('in_lesion', help='Binary mask of the lesion (.nii.gz).') - p.add_argument('out_dir', - help='Output directory for lesion information') + p.add_argument('out_json', + help='Output file (.json) for lesion information') p1 = p.add_mutually_exclusive_group() p1.add_argument('--bundle', help='Path of the bundle file (.trk).') @@ -46,10 +47,12 @@ def _build_arg_parser(): p.add_argument('--min_lesion_vol', type=float, default=7, help='Minimum lesion volume in mm3 [%(default)s].') - p.add_argument('--out_atlas', + p.add_argument('--out_lesion_atlas', metavar='FILE', help='Save the lesion as an atlas.') - p.add_argument('--out_lesion_stats', action='store_true', - help='Save the lesion-wise & streamlines count.') + p.add_argument('--out_lesion_stats', metavar='FILE', + help='Save the lesion-wise.') + p.add_argument('--out_streamlines_stats', metavar='FILE', + help='Save the lesion-wise streamlines count.') add_json_args(p) add_overwrite_arg(p) @@ -58,13 +61,6 @@ def _build_arg_parser(): return p -def _save_json_wrapper(args, out_name, out_dict): - filename = os.path.join(args.out_dir, out_name) - with open(filename, 'w') as outfile: - json.dump(out_dict, outfile, - sort_keys=args.sort_keys, indent=args.indent) - - def main(): parser = _build_arg_parser() args = parser.parse_args() @@ -76,10 +72,12 @@ def main(): assert_inputs_exist(parser, [args.in_lesion], optional=[args.bundle, args.bundle_mask, args.bundle_labels_map]) - assert_output_dirs_exist_and_empty(parser, args, args.out_dir) + assert_outputs_exist(parser, args, args.out_json, + optional=[args.out_lesion_stats, + args.out_streamlines_stats]) lesion_img = nib.load(args.in_lesion) - lesion_data = get_data_as_mask(lesion_img) + lesion_data = get_data_as_mask(lesion_img, dtype=np.bool) if args.bundle: bundle_name, _ = split_name_with_nii(os.path.basename(args.bundle)) @@ -109,10 +107,10 @@ def main(): map_data, lesion_atlas, single_label=is_single_label, voxel_sizes=voxel_sizes, min_lesion_vol=args.min_lesion_vol) - if args.out_atlas: - lesion_atlas *= map_data.astype(np.bool) + if args.out_lesion_atlas: + # lesion_atlas *= map_data.astype(np.bool) nib.save(nib.Nifti1Image(lesion_atlas, lesion_img.affine), - args.out_atlas) + args.out_lesion_atlas) if args.out_lesion_stats: lesion_dict = {} @@ -128,49 +126,28 @@ def main(): new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) lesion_dict[key]['strs_count'] = len(new_sft) - if is_single_label: - total_vol_dict = {} - avg_volume_dict = {} - std_volume_dict = {} - lesion_count_dict = {} - total_vol_dict[bundle_name] = lesion_load_dict['total_volume'] - avg_volume_dict[bundle_name] = lesion_load_dict['avg_volume'] - std_volume_dict[bundle_name] = lesion_load_dict['std_volume'] - lesion_count_dict[bundle_name] = lesion_load_dict['lesion_count'] - else: - total_vol_dict = {bundle_name: {}} - avg_volume_dict = {bundle_name: {}} - std_volume_dict = {bundle_name: {}} - lesion_count_dict = {bundle_name: {}} - for key in lesion_load_dict.keys(): - total_vol_dict[bundle_name][key] = \ - lesion_load_dict[key]['total_volume'] - avg_volume_dict[bundle_name][key] = \ - lesion_load_dict[key]['avg_volume'] - std_volume_dict[bundle_name][key] = \ - lesion_load_dict[key]['std_volume'] - lesion_count_dict[bundle_name][key] = \ - lesion_load_dict[key]['lesion_count'] - - _save_json_wrapper(args, 'sum_volume_per_label.json', total_vol_dict) - _save_json_wrapper(args, 'avg_volume_per_label.json', avg_volume_dict) - _save_json_wrapper(args, 'std_volume_per_label.json', std_volume_dict) - _save_json_wrapper(args, 'lesion_count_per_label.json', - lesion_count_dict) + volume_dict = {bundle_name: lesion_load_dict} + with open(args.out_json, 'w') as outfile: + json.dump(volume_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) - if args.out_lesion_stats: + if args.out_streamlines_stats or args.out_lesion_stats: vol_dict = {bundle_name: {}} - streamlines_count_dict = {bundle_name: {}} + streamlines_count_dict = {bundle_name: {'streamlines_count': {}}} for key in lesion_dict.keys(): vol_dict[bundle_name][key] = lesion_dict[key]['volume'] if args.bundle: - streamlines_count_dict[bundle_name][key] = \ + streamlines_count_dict[bundle_name]['streamlines_count'][key] = \ lesion_dict[key]['strs_count'] - _save_json_wrapper(args, 'volume_per_lesion.json', vol_dict) - if args.bundle: - _save_json_wrapper(args, 'streamlines_count_per_lesion.json', - streamlines_count_dict) + if args.out_lesion_stats: + with open(args.out_lesion_stats, 'w') as outfile: + json.dump(vol_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) + if args.out_streamlines_stats: + with open(args.out_streamlines_stats, 'w') as outfile: + json.dump(streamlines_count_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) if __name__ == "__main__": diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index 95242aebc..d394363ed 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -29,12 +29,12 @@ as the input directory. Each will generate a matrix. The average non-zeros value in the map will be reported in the matrices nodes. -The parameters --lesions_load will compute 3 lesions related matrices: -lesions_count.npy, lesions_vol.npy, lesions_sc.npy and put it inside of a -specified folder. They represent the number of lesions, the total volume of -lesions and the total of streamlines going through the lesions for of each +The parameters --lesion_load will compute 3 lesion related matrices: +lesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a +specified folder. They represent the number of lesion, the total volume of +lesion and the total of streamlines going through the lesion for of each connection. Each connection can be seen as a 'bundle' and then something -similar to scil_analyse_lesions_load.py is run for each 'bundle'. +similar to scil_analyse_lesion_load.py is run for each 'bundle'. """ import argparse @@ -61,7 +61,7 @@ validate_nbr_processes) from scilpy.tractanalysis.reproducibility_measures import compute_bundle_adjacency_voxel from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map -from scilpy.utils.metrics_tools import compute_lesions_stats +from scilpy.utils.metrics_tools import compute_lesion_stats def load_node_nifti(directory, in_label, out_label, ref_img): @@ -170,41 +170,41 @@ def _processing_wrapper(args): voxels_value = metric_data[density > 0] measures_to_return[metric_filename] = np.average(voxels_value) - # Lesions + # lesion else: - lesions_filename = measure[0][0] - computed_lesions_labels = measure[0][1] - lesions_img = measure[1] - if not is_header_compatible(lesions_img, labels_img): + lesion_filename = measure[0][0] + computed_lesion_labels = measure[0][1] + lesion_img = measure[1] + if not is_header_compatible(lesion_img, labels_img): logging.error('{} do not have a compatible header'.format( - lesions_filename)) + lesion_filename)) raise IOError - voxel_sizes = lesions_img.header.get_zooms()[0:3] - lesions_img.set_filename('tmp.nii.gz') - lesions_atlas = get_data_as_label(lesions_img) - tmp_dict = compute_lesions_stats( - density.astype(np.bool), lesions_atlas, + voxel_sizes = lesion_img.header.get_zooms()[0:3] + lesion_img.set_filename('tmp.nii.gz') + lesion_atlas = get_data_as_label(lesion_img) + tmp_dict = compute_lesion_stats( + density.astype(np.bool), lesion_atlas, voxel_sizes=voxel_sizes, single_label=True, - precomputed_lesions_labels=computed_lesions_labels) + precomputed_lesion_labels=computed_lesion_labels) tmp_ind = _streamlines_in_mask(list(streamlines), - lesions_atlas.astype(np.uint8), + lesion_atlas.astype(np.uint8), np.eye(3), [0, 0, 0]) streamlines_count = len( np.where(tmp_ind == [0, 1][True])[0].tolist()) if tmp_dict: - measures_to_return[lesions_filename+'vol'] = \ + measures_to_return[lesion_filename+'vol'] = \ tmp_dict['total_volume'] - measures_to_return[lesions_filename+'count'] = \ - tmp_dict['lesions_count'] - measures_to_return[lesions_filename+'sc'] = \ + measures_to_return[lesion_filename+'count'] = \ + tmp_dict['lesion_count'] + measures_to_return[lesion_filename+'sc'] = \ streamlines_count else: - measures_to_return[lesions_filename+'vol'] = 0 - measures_to_return[lesions_filename+'count'] = 0 - measures_to_return[lesions_filename+'sc'] = 0 + measures_to_return[lesion_filename+'vol'] = 0 + measures_to_return[lesion_filename+'count'] = 0 + measures_to_return[lesion_filename+'sc'] = 0 if include_dps: for dps_key in hdf5_file[key].keys(): @@ -250,9 +250,9 @@ def _build_arg_parser(): metavar=('IN_FILE', 'OUT_FILE'), help='Input (.nii.gz). and output file (.npy) for a metric ' 'weighted matrix.') - p.add_argument('--lesions_load', nargs=2, metavar=('IN_FILE', 'OUT_DIR'), + p.add_argument('--lesion_load', nargs=2, metavar=('IN_FILE', 'OUT_DIR'), help='Input binary mask (.nii.gz) and output directory ' - 'for all lesions related matrices.') + 'for all lesion related matrices.') p.add_argument('--density_weighting', action="store_true", help='Use density-weighting for the metric weighted matrix.') @@ -321,23 +321,23 @@ def main(): dict_metrics_out_name[in_name] = out_name measures_output_filename.append(out_name) - dict_lesions_out_name = {} - if args.lesions_load is not None: - in_name = args.lesions_load[0] - lesions_img = nib.load(in_name) - lesions_data = get_data_as_mask(lesions_img) - lesions_atlas, _ = ndi.label(lesions_data) - measures_to_compute.append(((in_name, np.unique(lesions_atlas)[1:]), - nib.Nifti1Image(lesions_atlas, - lesions_img.affine))) - - out_name_1 = os.path.join(args.lesions_load[1], 'lesions_vol.npy') - out_name_2 = os.path.join(args.lesions_load[1], 'lesions_count.npy') - out_name_3 = os.path.join(args.lesions_load[1], 'lesions_sc.npy') - - dict_lesions_out_name[in_name+'vol'] = out_name_1 - dict_lesions_out_name[in_name+'count'] = out_name_2 - dict_lesions_out_name[in_name+'sc'] = out_name_3 + dict_lesion_out_name = {} + if args.lesion_load is not None: + in_name = args.lesion_load[0] + lesion_img = nib.load(in_name) + lesion_data = get_data_as_mask(lesion_img) + lesion_atlas, _ = ndi.label(lesion_data) + measures_to_compute.append(((in_name, np.unique(lesion_atlas)[1:]), + nib.Nifti1Image(lesion_atlas, + lesion_img.affine))) + + out_name_1 = os.path.join(args.lesion_load[1], 'lesion_vol.npy') + out_name_2 = os.path.join(args.lesion_load[1], 'lesion_count.npy') + out_name_3 = os.path.join(args.lesion_load[1], 'lesion_sc.npy') + + dict_lesion_out_name[in_name+'vol'] = out_name_1 + dict_lesion_out_name[in_name+'count'] = out_name_2 + dict_lesion_out_name[in_name+'sc'] = out_name_3 measures_output_filename.extend([out_name_1, out_name_2, out_name_3]) assert_outputs_exist(parser, args, measures_output_filename) @@ -437,8 +437,8 @@ def main(): matrix_basename = dict_metrics_out_name[measure] elif measure in dict_maps_out_name: matrix_basename = dict_maps_out_name[measure] - elif measure in dict_lesions_out_name: - matrix_basename = dict_lesions_out_name[measure] + elif measure in dict_lesion_out_name: + matrix_basename = dict_lesion_out_name[measure] else: matrix_basename = measure diff --git a/scripts/scil_convert_json_to_xlsx.py b/scripts/scil_convert_json_to_xlsx.py index cc593f796..2c48e04ef 100755 --- a/scripts/scil_convert_json_to_xlsx.py +++ b/scripts/scil_convert_json_to_xlsx.py @@ -38,7 +38,6 @@ def _get_metrics_names(stats): for bundles in iter(stats.values()): for val in iter(bundles.values()): mnames |= set(val.keys()) - return mnames @@ -66,11 +65,17 @@ def _find_stat_name(stats): def _get_stats_parse_function(stats, stats_over_population): first_sub_stats = stats[list(stats.keys())[0]] first_bundle_stats = first_sub_stats[list(first_sub_stats.keys())[0]] - first_bundle_substat = first_bundle_stats[list(first_bundle_stats.keys())[0]] + first_bundle_substat = first_bundle_stats[list( + first_bundle_stats.keys())[0]] if len(first_bundle_stats.keys()) == 1 and\ _are_all_elements_scalars(first_bundle_stats): return _parse_scalar_stats + elif len(first_bundle_stats.keys()) == 4 and \ + set(first_bundle_stats.keys()) == \ + set(['lesion_total_vol', 'lesion_avg_vol', 'lesion_std_vol', + 'lesion_count']): + return _parse_lesion elif len(first_bundle_stats.keys()) == 4 and \ set(first_bundle_stats.keys()) == \ set(['min_length', 'max_length', 'mean_length', 'std_length']): @@ -156,6 +161,42 @@ def _parse_scalar_meanstd(stats, subs, bundles): return dataframes, df_names +def _parse_scalar_lesions(stats, subs, bundles): + metric_names = _get_metrics_names(stats) + nb_subs = len(subs) + nb_bundles = len(bundles) + nb_metrics = len(metric_names) + + means = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + stddev = np.full((nb_subs, nb_bundles, nb_metrics), np.NaN) + + for sub_id, sub_name in enumerate(subs): + for bundle_id, bundle_name in enumerate(bundles): + for metric_id, metric_name in enumerate(metric_names): + b_stat = stats[sub_name].get(bundle_name) + + if b_stat is not None: + m_stat = b_stat.get(metric_name) + + if m_stat is not None: + means[sub_id, bundle_id, metric_id] = m_stat['mean'] + stddev[sub_id, bundle_id, metric_id] = m_stat['std'] + + dataframes = [] + df_names = [] + + for metric_id, metric_name in enumerate(metric_names): + dataframes.append(pd.DataFrame(data=means[:, :, metric_id], + index=subs, columns=bundles)) + df_names.append(metric_name + "_mean") + + dataframes.append(pd.DataFrame(data=stddev[:, :, metric_id], + index=subs, columns=bundles)) + df_names.append(metric_name + "_std") + + return dataframes, df_names + + def _parse_lengths(stats, subs, bundles): nb_subs = len(subs) nb_bundles = len(bundles) @@ -193,6 +234,44 @@ def _parse_lengths(stats, subs, bundles): return dataframes, df_names +def _parse_lesion(stats, subs, bundles): + nb_subs = len(subs) + nb_bundles = len(bundles) + + total_volume = np.full((nb_subs, nb_bundles), np.NaN) + avg_volume = np.full((nb_subs, nb_bundles), np.NaN) + std_volume = np.full((nb_subs, nb_bundles), np.NaN) + lesion_count = np.full((nb_subs, nb_bundles), np.NaN) + + for sub_id, sub_name in enumerate(subs): + for bundle_id, bundle_name in enumerate(bundles): + b_stat = stats[sub_name].get(bundle_name) + + if b_stat is not None: + total_volume[sub_id, bundle_id] = b_stat['lesion_total_vol'] + avg_volume[sub_id, bundle_id] = b_stat['lesion_avg_vol'] + std_volume[sub_id, bundle_id] = b_stat['lesion_std_vol'] + lesion_count[sub_id, bundle_id] = b_stat['lesion_count'] + + dataframes = [pd.DataFrame(data=total_volume, + index=subs, + columns=bundles), + pd.DataFrame(data=avg_volume, + index=subs, + columns=bundles), + pd.DataFrame(data=std_volume, + index=subs, + columns=bundles), + pd.DataFrame(data=lesion_count, + index=subs, + columns=bundles)] + + df_names = ["lesion_total_vol", "lesion_avg_vol", + "lesion_std_vol", "lesion_count"] + + return dataframes, df_names + + def _parse_per_label_scalar(stats, subs, bundles): labels = _get_labels(stats) labels.sort() diff --git a/scripts/scil_merge_json.py b/scripts/scil_merge_json.py index 68a51e25f..b287d5e21 100755 --- a/scripts/scil_merge_json.py +++ b/scripts/scil_merge_json.py @@ -10,6 +10,8 @@ import json import os +import numpy as np + from scilpy.io.utils import (add_overwrite_arg, add_json_args, assert_inputs_exist, assert_outputs_exist) @@ -42,6 +44,20 @@ def _merge_dict(dict_1, dict_2, no_list=False, recursive=False): return new_dict +def _average_dict(dict_1): + for key in dict_1.keys(): + if isinstance(dict_1[key], dict): + dict_1[key] = _average_dict(dict_1[key]) + elif isinstance(dict_1[key], list) or np.isscalar(dict_1[key]): + new_dict = {} + for subkey in dict_1.keys(): + new_dict[subkey] = {'mean': np.average(dict_1[subkey]), + 'std': np.std(dict_1[subkey])} + return new_dict + + return dict_1 + + def _build_arg_parser(): p = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=__doc__) @@ -61,6 +77,8 @@ def _build_arg_parser(): help='Merge ignoring parent key (e.g for population).') p.add_argument('--recursive', action='store_true', help='Merge all entries at the lowest layers.') + p.add_argument('--average_last_layer', action='store_true', + help='Average all entries at the lowest layers.') add_json_args(p) add_overwrite_arg(p) @@ -87,6 +105,9 @@ def main(): no_list=args.no_list, recursive=args.recursive) + if args.average_last_layer: + out_dict = _average_dict(out_dict) + with open(args.out_json, 'w') as outfile: if args.add_parent_key: out_dict = {args.add_parent_key: out_dict} From 5b6e385fc8e606d70450553a08864d4666fa8c27 Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 2 Mar 2021 12:12:42 -0600 Subject: [PATCH 08/15] adapted for connectomics --- requirements.txt | 2 +- scripts/scil_compute_connectivity.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index bf073508f..78bfaab86 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,4 +32,4 @@ bctpy==0.5.* statsmodels==0.11.* dmri-commit==1.4.* openpyxl==2.6.* -cvxpy==1.0.* +cvxpy==1.1.* diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index d394363ed..afec45b55 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -196,7 +196,7 @@ def _processing_wrapper(args): if tmp_dict: measures_to_return[lesion_filename+'vol'] = \ - tmp_dict['total_volume'] + tmp_dict['lesion_total_volume'] measures_to_return[lesion_filename+'count'] = \ tmp_dict['lesion_count'] measures_to_return[lesion_filename+'sc'] = \ @@ -325,7 +325,7 @@ def main(): if args.lesion_load is not None: in_name = args.lesion_load[0] lesion_img = nib.load(in_name) - lesion_data = get_data_as_mask(lesion_img) + lesion_data = get_data_as_mask(lesion_img, dtype=np.bool) lesion_atlas, _ = ndi.label(lesion_data) measures_to_compute.append(((in_name, np.unique(lesion_atlas)[1:]), nib.Nifti1Image(lesion_atlas, From 53dc14b7869a921cc64e4b80e35d806efbe4aff2 Mon Sep 17 00:00:00 2001 From: frheault Date: Mon, 12 Apr 2021 14:30:54 -0500 Subject: [PATCH 09/15] Fix max comments --- scripts/scil_analyse_lesions_load.py | 33 +++++++++++++++------------- scripts/scil_compute_connectivity.py | 6 ++--- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index 6cab4e1c3..5c84ac18c 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -5,6 +5,10 @@ This script will output informations about lesion load in bundle(s). The input can either be streamlines, binary bundle map, or a bundle voxel label map. + +To be considered a valid lesion, the lesion volume must be at least +min_lesion_vol mm3. This avoid the detection of thousand of single voxel +lesions if an automatic lesion segmentation tool is used. """ import argparse @@ -34,7 +38,7 @@ def _build_arg_parser(): formatter_class=argparse.RawTextHelpFormatter) p.add_argument('in_lesion', - help='Binary mask of the lesion (.nii.gz).') + help='Binary mask of the lesion(s) (.nii.gz).') p.add_argument('out_json', help='Output file (.json) for lesion information') p1 = p.add_mutually_exclusive_group() @@ -48,11 +52,11 @@ def _build_arg_parser(): p.add_argument('--min_lesion_vol', type=float, default=7, help='Minimum lesion volume in mm3 [%(default)s].') p.add_argument('--out_lesion_atlas', metavar='FILE', - help='Save the lesion as an atlas.') + help='Save the labelized lesion(s) map (.nii.gz).') p.add_argument('--out_lesion_stats', metavar='FILE', - help='Save the lesion-wise.') + help='Save the lesion-wise volume measure (.json).') p.add_argument('--out_streamlines_stats', metavar='FILE', - help='Save the lesion-wise streamlines count.') + help='Save the lesion-wise streamline count (.json).') add_json_args(p) add_overwrite_arg(p) @@ -112,13 +116,18 @@ def main(): nib.save(nib.Nifti1Image(lesion_atlas, lesion_img.affine), args.out_lesion_atlas) - if args.out_lesion_stats: + volume_dict = {bundle_name: lesion_load_dict} + with open(args.out_json, 'w') as outfile: + json.dump(volume_dict, outfile, + sort_keys=args.sort_keys, indent=args.indent) + + if args.out_streamlines_stats or args.out_lesion_stats: lesion_dict = {} for lesion in np.unique(lesion_atlas)[1:]: curr_vol = np.count_nonzero(lesion_atlas[lesion_atlas == lesion]) \ * np.prod(voxel_sizes) if curr_vol >= args.min_lesion_vol: - key = str(lesion).zfill(3) + key = str(lesion).zfill(4) lesion_dict[key] = {'volume': curr_vol} if args.bundle: tmp = np.zeros(lesion_atlas.shape) @@ -126,23 +135,17 @@ def main(): new_sft, _ = filter_grid_roi(sft, tmp, 'any', False) lesion_dict[key]['strs_count'] = len(new_sft) - volume_dict = {bundle_name: lesion_load_dict} - with open(args.out_json, 'w') as outfile: - json.dump(volume_dict, outfile, - sort_keys=args.sort_keys, indent=args.indent) - - if args.out_streamlines_stats or args.out_lesion_stats: - vol_dict = {bundle_name: {}} + lesion_vol_dict = {bundle_name: {}} streamlines_count_dict = {bundle_name: {'streamlines_count': {}}} for key in lesion_dict.keys(): - vol_dict[bundle_name][key] = lesion_dict[key]['volume'] + lesion_vol_dict[bundle_name][key] = lesion_dict[key]['volume'] if args.bundle: streamlines_count_dict[bundle_name]['streamlines_count'][key] = \ lesion_dict[key]['strs_count'] if args.out_lesion_stats: with open(args.out_lesion_stats, 'w') as outfile: - json.dump(vol_dict, outfile, + json.dump(lesion_vol_dict, outfile, sort_keys=args.sort_keys, indent=args.indent) if args.out_streamlines_stats: with open(args.out_streamlines_stats, 'w') as outfile: diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index afec45b55..ebabe35c9 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -29,10 +29,10 @@ as the input directory. Each will generate a matrix. The average non-zeros value in the map will be reported in the matrices nodes. -The parameters --lesion_load will compute 3 lesion related matrices: +The parameters --lesion_load will compute 3 lesion(s) related matrices: lesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a specified folder. They represent the number of lesion, the total volume of -lesion and the total of streamlines going through the lesion for of each +lesion(s) and the total of streamlines going through the lesion(s) for of each connection. Each connection can be seen as a 'bundle' and then something similar to scil_analyse_lesion_load.py is run for each 'bundle'. """ @@ -252,7 +252,7 @@ def _build_arg_parser(): 'weighted matrix.') p.add_argument('--lesion_load', nargs=2, metavar=('IN_FILE', 'OUT_DIR'), help='Input binary mask (.nii.gz) and output directory ' - 'for all lesion related matrices.') + 'for all lesion-related matrices.') p.add_argument('--density_weighting', action="store_true", help='Use density-weighting for the metric weighted matrix.') From 9e5c8e594f9fefd0da1783b3864aaf7d2bdc979a Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 14 Apr 2021 11:53:38 -0500 Subject: [PATCH 10/15] Forgotten comments --- scripts/scil_analyse_lesions_load.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index 5c84ac18c..a237f5d8e 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -45,9 +45,9 @@ def _build_arg_parser(): p1.add_argument('--bundle', help='Path of the bundle file (.trk).') p1.add_argument('--bundle_mask', - help='Path of the bundle binary mask.') + help='Path of the bundle binary mask (.nii.gz).') p1.add_argument('--bundle_labels_map', - help='Path of the bundle labels_map.') + help='Path of the bundle labels map (.nii.gz).') p.add_argument('--min_lesion_vol', type=float, default=7, help='Minimum lesion volume in mm3 [%(default)s].') From b3a8e4842837a7b565bfeb5b348b102f1b8bb3fa Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 14 Apr 2021 16:11:19 -0500 Subject: [PATCH 11/15] Add min_lesion_vol --- scripts/scil_compute_connectivity.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/scil_compute_connectivity.py b/scripts/scil_compute_connectivity.py index ebabe35c9..cc69b31bf 100755 --- a/scripts/scil_compute_connectivity.py +++ b/scripts/scil_compute_connectivity.py @@ -86,6 +86,7 @@ def _processing_wrapper(args): similarity_directory = args[4][0] weighted = args[5] include_dps = args[6] + min_lesion_vol = args[7] hdf5_file = h5py.File(hdf5_filename, 'r') key = '{}_{}'.format(in_label, out_label) @@ -186,6 +187,7 @@ def _processing_wrapper(args): tmp_dict = compute_lesion_stats( density.astype(np.bool), lesion_atlas, voxel_sizes=voxel_sizes, single_label=True, + min_lesion_vol=min_lesion_vol, precomputed_lesion_labels=computed_lesion_labels) tmp_ind = _streamlines_in_mask(list(streamlines), @@ -253,6 +255,8 @@ def _build_arg_parser(): p.add_argument('--lesion_load', nargs=2, metavar=('IN_FILE', 'OUT_DIR'), help='Input binary mask (.nii.gz) and output directory ' 'for all lesion-related matrices.') + p.add_argument('--min_lesion_vol', type=float, default=7, + help='Minimum lesion volume in mm3 [%(default)s].') p.add_argument('--density_weighting', action="store_true", help='Use density-weighting for the metric weighted matrix.') @@ -374,7 +378,8 @@ def main(): measures_to_compute, args.similarity, args.density_weighting, - args.include_dps])) + args.include_dps, + args.min_lesion_vol])) else: pool = multiprocessing.Pool(nbr_cpu) measures_dict_list = pool.map(_processing_wrapper, @@ -386,7 +391,8 @@ def main(): itertools.repeat(args.similarity), itertools.repeat( args.density_weighting), - itertools.repeat(args.include_dps))) + itertools.repeat(args.include_dps), + itertools.repeat(args.min_lesion_vol))) pool.close() pool.join() From 1941f66e340032419699f0fde687f47147448619 Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 1 Jun 2021 17:53:56 -0500 Subject: [PATCH 12/15] Fixed tests --- scilpy/utils/metrics_tools.py | 2 +- scripts/tests/test_plot_mean_std_per_point.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 358c9c238..9908e681a 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -328,7 +328,7 @@ def plot_metrics_stats(means, stds, title=None, xlabel=None, if figlabel is not None: fig.set_label(figlabel) - if means.shape[-1] > 1: + if means.ndim > 1: mean = np.average(means, axis=1) std = np.std(means, axis=1) alpha = 0.5 diff --git a/scripts/tests/test_plot_mean_std_per_point.py b/scripts/tests/test_plot_mean_std_per_point.py index 0899fd0bc..455a93d7b 100644 --- a/scripts/tests/test_plot_mean_std_per_point.py +++ b/scripts/tests/test_plot_mean_std_per_point.py @@ -22,6 +22,6 @@ def test_execution_tractometry(script_runner): in_json = os.path.join(get_home(), 'tractometry', 'metric_label.json') ret = script_runner.run('scil_plot_mean_std_per_point.py', in_json, - 'out/') + 'out/', '--stats_over_population') assert ret.success From 6e796546e9dcd9fad8d8124194b9af44db99b17a Mon Sep 17 00:00:00 2001 From: Francois Rheault Date: Thu, 8 Jul 2021 10:39:25 -0500 Subject: [PATCH 13/15] Update scripts/scil_analyse_lesions_load.py Co-authored-by: arnaudbore --- scripts/scil_analyse_lesions_load.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_analyse_lesions_load.py b/scripts/scil_analyse_lesions_load.py index a237f5d8e..1d01c97fc 100644 --- a/scripts/scil_analyse_lesions_load.py +++ b/scripts/scil_analyse_lesions_load.py @@ -40,7 +40,7 @@ def _build_arg_parser(): p.add_argument('in_lesion', help='Binary mask of the lesion(s) (.nii.gz).') p.add_argument('out_json', - help='Output file (.json) for lesion information') + help='Output file for lesion information (.json).') p1 = p.add_mutually_exclusive_group() p1.add_argument('--bundle', help='Path of the bundle file (.trk).') From 03b8f4eebd65fad55e1d4112396437a4ba87a7ad Mon Sep 17 00:00:00 2001 From: Francois Rheault Date: Thu, 8 Jul 2021 10:39:31 -0500 Subject: [PATCH 14/15] Update scilpy/utils/metrics_tools.py Co-authored-by: arnaudbore --- scilpy/utils/metrics_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 9908e681a..4adc7e9fd 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -31,7 +31,7 @@ def compute_lesion_stats(map_data, lesion_atlas, single_label=True, min_lesion_vol : float Minimum lesion volume in mm3 (default: 7, cross-shape). precomputed_lesion_labels : np.ndarray (N) - For connectivity analysis, when the unique lesion labels are know, + For connectivity analysis, when the unique lesion labels are known, provided a pre-computed list of labels save computation. Returns --------- From 4846a99a8cf3bafda60565aabe31bc2eb6b6996a Mon Sep 17 00:00:00 2001 From: frheault Date: Thu, 8 Jul 2021 11:49:56 -0500 Subject: [PATCH 15/15] Arnaud comments --- scilpy/utils/metrics_tools.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 4adc7e9fd..e28658731 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -20,7 +20,7 @@ def compute_lesion_stats(map_data, lesion_atlas, single_label=True, Parameters ------------ - map_data : StatefulTractogram + map_data : np.ndarray Either a binary mask (uint8) or a voxel labels map (int16). lesion_atlas : np.ndarray (3) Labelled atlas of lesion. Should be int16. @@ -353,10 +353,6 @@ def plot_metrics_stats(means, stds, title=None, xlabel=None, ax.plot(dim, mean, color="k", linewidth=5, solid_capstyle='round') # Plot the std - # print(mean) - # print(mean - std) - # print(mean + std) - # print() plt.fill_between(dim, mean - std, mean + std, facecolor=fill_color, alpha=alpha)