diff --git a/NAMESPACE b/NAMESPACE index b65e947..74c7185 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -101,6 +101,7 @@ export(createResNetWithSpatialTransformerNetworkModel3D) export(createResUnetModel2D) export(createResUnetModel3D) export(createRmnetGenerator) +export(createShivaUnetModel3D) export(createSimpleClassificationWithSpatialTransformerNetworkModel2D) export(createSimpleClassificationWithSpatialTransformerNetworkModel3D) export(createSimpleFullyConvolutionalNeuralNetworkModel3D) @@ -196,6 +197,7 @@ export(regressionMatchImage) export(sampleFromCategoricalDistribution) export(sampleFromOutput) export(shivaPvsSegmentation) +export(shivaWmhSegmentation) export(simulateBiasField) export(splitMixtureParameters) export(sysuMediaWmhSegmentation) diff --git a/R/createCustomUnetModel.R b/R/createCustomUnetModel.R index cea4667..150bdd1 100644 --- a/R/createCustomUnetModel.R +++ b/R/createCustomUnetModel.R @@ -297,6 +297,140 @@ createHippMapp3rUnetModel3D <- function( inputImageSize, return( unetModel ) } +#' Implementation of the "shiva" u-net architecture for PVS and WMH +#' segmentation +#' +#' Publications: +#' +#' * PVS: https://pubmed.ncbi.nlm.nih.gov/34262443/ +#' * WMH: https://pubmed.ncbi.nlm.nih.gov/38050769/ +#' +#' with respective GitHub repositories: +#' +#' * PVS: https://github.com/pboutinaud/SHIVA_PVS +#' * WMH: https://github.com/pboutinaud/SHIVA_WMH +#' +#' @param numberOfModalities Specifies number of channels in the +#' architecture. +#' @return a u-net keras model +#' @author Tustison NJ +#' @examples +#' \dontrun{ +#' +#' model <- createShivaUnetModel3D() +#' +#' } +#' @import keras +#' @export +createShivaUnetModel3D <- function( numberOfModalities = 1 ) +{ + K <- tensorflow::tf$keras$backend + + getPadShape <- function( targetLayer, referenceLayer ) + { + padShape <- list() + + delta <- K$int_shape( targetLayer )[[2]] - K$int_shape( referenceLayer )[[2]] + if( delta %% 2 != 0 ) + { + padShape[[1]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) + 1L ) + } else { + padShape[[1]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) ) + } + + delta <- K$int_shape( targetLayer )[[3]] - K$int_shape( referenceLayer )[[3]] + if( delta %% 2 != 0 ) + { + padShape[[2]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) + 1L ) + } else { + padShape[[2]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) ) + } + + delta <- K$int_shape( targetLayer )[[4]] - K$int_shape( referenceLayer )[[4]] + if( delta %% 2 != 0 ) + { + padShape[[3]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) + 1L ) + } else { + padShape[[3]] <- c( as.integer( delta / 2 ), as.integer( delta / 2 ) ) + } + if( all( padShape[[1]] == c( 0, 0 ) ) && all( padShape[[2]] == c( 0, 0 ) ) && all( padShape[[3]] == c( 0, 0 ) ) ) + { + return( NULL ) + } else { + return( padShape ) + } + } + inputImageSize <- c( 160, 214, 176, numberOfModalities ) + numberOfFilters <- c( 10, 18, 32, 58, 104, 187, 337 ) + + inputs <- layer_input( shape = inputImageSize ) + + # encoding layers + + encodingLayers <- list() + + outputs <- inputs + for( i in seq.int( length( numberOfFilters ) ) ) + { + outputs <- outputs %>% layer_conv_3d( numberOfFilters[i], kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + + outputs <- outputs %>% layer_conv_3d( numberOfFilters[i], kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + + encodingLayers[[i]] <- outputs + outputs <- outputs %>% layer_max_pooling_3d( pool_size = 2L ) + dropoutRate <- 0.05 + if( i > 1 ) + { + dropoutRate <- 0.5 + } + outputs <- outputs %>% layer_spatial_dropout_3d( rate = dropoutRate ) + } + + # decoding layers + + for( i in seq.int( from = length( encodingLayers ), to = 1, by = -1 ) ) + { + upsampleLayer <- outputs %>% layer_upsampling_3d( size = 2L ) + padShape <- getPadShape( encodingLayers[[i]], upsampleLayer ) + if( i > 1 && ! is.null( padShape ) ) + { + zeroLayer <- upsampleLayer %>% layer_zero_padding_3d( padding = padShape ) + outputs <- layer_concatenate( list( zeroLayer, encodingLayers[[i]] ), axis = -1L, trainable = TRUE ) + } else { + outputs <- layer_concatenate( list( upsampleLayer, encodingLayers[[i]] ), axis = -1L, trainable = TRUE ) + } + + outputs <- outputs %>% layer_conv_3d( K$int_shape( outputs )[[5]], kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + + outputs <- outputs %>% layer_conv_3d( numberOfFilters[i], kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + outputs <- outputs %>% layer_spatial_dropout_3d( rate = 0.5 ) + } + + # final + + outputs <- outputs %>% layer_conv_3d( 10, kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + + outputs <- outputs %>% layer_conv_3d( 10, kernel_size = 3L, padding = 'same', use_bias = FALSE ) + outputs <- outputs %>% layer_batch_normalization() + outputs <- outputs %>% layer_activation( "swish" ) + + outputs <- outputs %>% layer_conv_3d( 1, kernel_size = 1L, activation = "sigmoid", padding = 'same' ) + + unetModel <- keras_model( inputs = inputs, outputs = outputs ) + + return( unetModel ) +} + #' Implementation of the "HyperMapp3r" U-net architecture #' #' Creates a keras model implementation of the u-net architecture @@ -480,7 +614,7 @@ createSysuMediaUnetModel2D <- function( inputImageSize, anatomy = c( "wmh", "cla { getCropShape <- function( targetLayer, referenceLayer ) { - K <- keras::backend() + K <- tensorflow::tf$keras$backend cropShape <- list() @@ -592,7 +726,7 @@ createSysuMediaUnetModel3D <- function( inputImageSize, { getCropShape <- function( targetLayer, referenceLayer ) { - K <- keras::backend() + K <- tensorflow::tf$keras$backend cropShape <- list() diff --git a/R/getPretrainedNetwork.R b/R/getPretrainedNetwork.R index ca2c9c0..78b340e 100644 --- a/R/getPretrainedNetwork.R +++ b/R/getPretrainedNetwork.R @@ -123,6 +123,16 @@ getPretrainedNetwork <- function( "pvs_shiva_t1_flair_2", "pvs_shiva_t1_flair_3", "pvs_shiva_t1_flair_4", + "wmh_shiva_flair_0", + "wmh_shiva_flair_1", + "wmh_shiva_flair_2", + "wmh_shiva_flair_3", + "wmh_shiva_flair_4", + "wmh_shiva_t1_flair_0", + "wmh_shiva_t1_flair_1", + "wmh_shiva_t1_flair_2", + "wmh_shiva_t1_flair_3", + "wmh_shiva_t1_flair_4", "protonLungMri", "protonLobes", "pulmonaryArteryWeights", @@ -145,7 +155,7 @@ getPretrainedNetwork <- function( "wholeHeadInpaintingFLAIR", "wholeHeadInpaintingPatchBasedT1", "wholeHeadInpaintingPatchBasedFLAIR", - "wholeTumorSegmentationT2Flair", + "wholeTumorSegmentationT2Flair", "wholeLungMaskFromVentilation" ), targetFileName, antsxnetCacheDirectory = NULL ) { @@ -250,17 +260,27 @@ getPretrainedNetwork <- function( mouseT2wBrainParcellation3DNick = "https://figshare.com/ndownloader/files/44714944", mouseT2wBrainParcellation3DTct = "https://figshare.com/ndownloader/files/47214538", mouseSTPTBrainParcellation3DJay = "https://figshare.com/ndownloader/files/46710592", - pvs_shiva_t1_0 = "https://figshare.com/ndownloader/files/48363799", - pvs_shiva_t1_1 = "https://figshare.com/ndownloader/files/48363832", - pvs_shiva_t1_2 = "https://figshare.com/ndownloader/files/48363814", - pvs_shiva_t1_3 = "https://figshare.com/ndownloader/files/48363790", - pvs_shiva_t1_4 = "https://figshare.com/ndownloader/files/48363829", - pvs_shiva_t1_5 = "https://figshare.com/ndownloader/files/48363823", - pvs_shiva_t1_flair_0 = "https://figshare.com/ndownloader/files/48363784", - pvs_shiva_t1_flair_1 = "https://figshare.com/ndownloader/files/48363820", - pvs_shiva_t1_flair_2 = "https://figshare.com/ndownloader/files/48363796", - pvs_shiva_t1_flair_3 = "https://figshare.com/ndownloader/files/48363793", - pvs_shiva_t1_flair_4 = "https://figshare.com/ndownloader/files/48363826", + pvs_shiva_t1_0 = "https://figshare.com/ndownloader/files/48660169", + pvs_shiva_t1_1 = "https://figshare.com/ndownloader/files/48660193", + pvs_shiva_t1_2 = "https://figshare.com/ndownloader/files/48660199", + pvs_shiva_t1_3 = "https://figshare.com/ndownloader/files/48660178", + pvs_shiva_t1_4 = "https://figshare.com/ndownloader/files/48660172", + pvs_shiva_t1_5 = "https://figshare.com/ndownloader/files/48660187", + pvs_shiva_t1_flair_0 = "https://figshare.com/ndownloader/files/48660181", + pvs_shiva_t1_flair_1 = "https://figshare.com/ndownloader/files/48660175", + pvs_shiva_t1_flair_2 = "https://figshare.com/ndownloader/files/48660184", + pvs_shiva_t1_flair_3 = "https://figshare.com/ndownloader/files/48660190", + pvs_shiva_t1_flair_4 = "https://figshare.com/ndownloader/files/48660196", + wmh_shiva_flair_0 = "https://figshare.com/ndownloader/files/48660487", + wmh_shiva_flair_1 = "https://figshare.com/ndownloader/files/48660496", + wmh_shiva_flair_2 = "https://figshare.com/ndownloader/files/48660493", + wmh_shiva_flair_3 = "https://figshare.com/ndownloader/files/48660490", + wmh_shiva_flair_4 = "https://figshare.com/ndownloader/files/48660511", + wmh_shiva_t1_flair_0 = "https://figshare.com/ndownloader/files/48660529", + wmh_shiva_t1_flair_1 = "https://figshare.com/ndownloader/files/48660547", + wmh_shiva_t1_flair_2 = "https://figshare.com/ndownloader/files/48660499", + wmh_shiva_t1_flair_3 = "https://figshare.com/ndownloader/files/48660550", + wmh_shiva_t1_flair_4 = "https://figshare.com/ndownloader/files/48660544", protonLungMri = "https://ndownloader.figshare.com/files/13606799", protonLobes = "https://figshare.com/ndownloader/files/30678455", pulmonaryAirwayWeights = "https://figshare.com/ndownloader/files/45187168", diff --git a/R/whiteMatterHyperintensitySegmentation.R b/R/whiteMatterHyperintensitySegmentation.R index 4b903fc..fec9b62 100644 --- a/R/whiteMatterHyperintensitySegmentation.R +++ b/R/whiteMatterHyperintensitySegmentation.R @@ -606,7 +606,7 @@ wmhSegmentation <- function( flair, t1, whiteMatterMask = NULL, domainImageIsMask = TRUE ) } -#' PVS/VRS segmentation. +#' SHIVA PVS/VRS segmentation. #' #' Perform segmentation of perivascular (PVS) or Vircho-Robin spaces (VRS). #' \url{https://pubmed.ncbi.nlm.nih.gov/34262443/} @@ -733,13 +733,14 @@ shivaPvsSegmentation <- function( t1, flair = NULL, for( i in seq.int( length( modelIds ) ) ) { - modelFile <- getPretrainedNetwork( paste0( "pvs_shiva_t1_", modelIds[i] ), - antsxnetCacheDirectory = antsxnetCacheDirectory ) + modelWeightsFile <- getPretrainedNetwork( paste0( "pvs_shiva_t1_", modelIds[i] ), + antsxnetCacheDirectory = antsxnetCacheDirectory ) if( verbose ) { - cat( "Loading", modelFile, "\n" ) + cat( "Loading", modelWeightsFile, "\n" ) } - model <- tensorflow::tf$keras$models$load_model( modelFile, compile = FALSE ) + model <- createShivaUnetModel3D( numberOfModalities = 1 ) + model$load_weights( modelWeightsFile ) if( i == 1 ) { batchY <- model$predict( batchX, verbose = verbose ) @@ -761,13 +762,14 @@ shivaPvsSegmentation <- function( t1, flair = NULL, for( i in seq.int( length( modelIds ) ) ) { - modelFile <- getPretrainedNetwork( paste0( "pvs_shiva_t1_flair_", modelIds[i] ), - antsxnetCacheDirectory = antsxnetCacheDirectory ) + modelWeightsFile <- getPretrainedNetwork( paste0( "pvs_shiva_t1_flair_", modelIds[i] ), + antsxnetCacheDirectory = antsxnetCacheDirectory ) if( verbose ) { - cat( "Loading", modelFile, "\n" ) + cat( "Loading", modelWeightsFile, "\n" ) } - model <- tensorflow::tf$keras$models$load_model( modelFile, compile = FALSE ) + model <- createShivaUnetModel3D( numberOfModalities = 2 ) + model$load_weights( modelWeightsFile ) if( i == 1 ) { batchY <- model$predict( batchX, verbose = verbose ) @@ -783,4 +785,185 @@ shivaPvsSegmentation <- function( t1, flair = NULL, direction = antsGetDirection( reorientTemplate ) ) pvs <- applyAntsrTransformToImage( invertAntsrTransform( xfrm ), pvs, t1 ) return( pvs ) +} + +#' SHIVA WMH segmentation. +#' +#' Perform segmentation of white matter hyperintensities. +#' \url{https://pubmed.ncbi.nlm.nih.gov/38050769/} +#' with the original implementation available here: +#' https://github.com/pboutinaud/SHIVA_WMH +#' +#' @param flair input 3-D FLAIR brain image. +#' @param t1 (Optional) input 3-D T1-weighted brain image (aligned to FLAIR image). +#' @param whichModel integer or string. Several models were trained for the +#' case of T1-only or T1/FLAIR image pairs. One can use a specific single +#' trained model or the average of the entire ensemble. I.e., options are: +#' * For T1-only: 0, 1, 2, 3, 4, 5. +#' * For T1/FLAIR: 0, 1, 2, 3, 4. +#' * Or "all" for using the entire ensemble. +#' @param doPreprocessing perform n4 bias correction, intensity truncation, brain +#' extraction. +#' @param antsxnetCacheDirectory destination directory for storing the downloaded +#' template and model weights. Since these can be resused, if +#' \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the +#' inst/extdata/ subfolder of the ANTsRNet package. +#' @param verbose print progress. +#' @return probabilistic image. +#' @author Tustison NJ +#' @examples +#' \dontrun{ +#' library( ANTsRNet ) +#' library( keras ) +#' +#' t1 <- antsImageRead( "t1.nii.gz" ) +#' flair <- antsImageRead( "flair.nii.gz" ) +#' results <- wmhSegmentation( t1, flair ) +#' } +#' @export +shivaWmhSegmentation <- function( flair, t1 = NULL, + whichModel = "all", doPreprocessing = TRUE, + antsxnetCacheDirectory = NULL, verbose = FALSE ) +{ + ################################ + # + # Preprocess images + # + ################################ + + t1Preprocessed <- NULL + flairPreprocessed <- NULL + brainMask <- NULL + + if( doPreprocessing ) + { + if( verbose ) + { + message( "Preprocess image(s).\n" ) + } + flairPreprocessing <- preprocessBrainImage( flair, + truncateIntensity = c( 0.0, 0.99 ), + brainExtractionModality = "flair", + doBiasCorrection = TRUE, + doDenoising = FALSE, + intensityNormalizationType = "01", + antsxnetCacheDirectory = antsxnetCacheDirectory, + verbose = verbose ) + brainMask <- thresholdImage( flairPreprocessing$brainMask, 0.5, 1, 1, 0 ) + flairPreprocessed <- flairPreprocessing$preprocessedImage * brainMask + + if( ! is.null( t1 ) ) + { + t1Preprocessing <- preprocessBrainImage( t1, + truncateIntensity = c( 0.0, 0.99 ), + brainExtractionModality = NULL, + doBiasCorrection = TRUE, + doDenoising = FALSE, + intensityNormalizationType = "01", + antsxnetCacheDirectory = antsxnetCacheDirectory, + verbose = verbose ) + t1Preprocessed <- t1Preprocessing$preprocessedImage * brainMask + } + } else { + flairPreprocessed <- antsImageClone( flair ) + if( ! is.null( t1 ) ) + { + t1Preprocessed <- antsImageClone( t1 ) + } + brainMask <- thresholdImage( flair, 0, 0, 0, 1 ) + } + + imageShape <- c( 160, 214, 176 ) + onesArray <- array( data = 1, dim = imageShape ) + reorientTemplate <- as.antsImage( onesArray, origin = c( 0, 0, 0 ), + spacing = c( 1, 1, 1 ), + direction = diag( 3 ) ) + + centerOfMassTemplate <- getCenterOfMass( reorientTemplate ) + centerOfMassImage <- getCenterOfMass( brainMask ) + xfrm <- createAntsrTransform( type = "Euler3DTransform", + center = round( centerOfMassTemplate ), + translation = round( centerOfMassImage - centerOfMassTemplate ) ) + + flairPreprocessed <- applyAntsrTransformToImage( xfrm, flairPreprocessed, + reorientTemplate ) + if( ! is.null( t1 ) ) + { + t1Preprocessed <- applyAntsrTransformToImage( xfrm, t1Preprocessed, + reorientTemplate ) + } + + ################################ + # + # Load models and predict + # + ################################ + + batchY <- NULL + + if( is.null( t1 ) ) + { + batchX <- array( data = 0, dim = c( 1, imageShape, 1 ) ) + batchX[1,,,,1] <- as.array( flairPreprocessed ) + + modelIds <- c( whichModel ) + if( whichModel == "all" ) + { + modelIds <- c( 0, 1, 2, 3, 4 ) + } + + for( i in seq.int( length( modelIds ) ) ) + { + modelWeightsFile <- getPretrainedNetwork( paste0( "wmh_shiva_flair_", modelIds[i] ), + antsxnetCacheDirectory = antsxnetCacheDirectory ) + if( verbose ) + { + cat( "Loading", modelWeightsFile, "\n" ) + } + model <- createShivaUnetModel3D( numberOfModalities = 1 ) + model$load_weights( modelWeightsFile ) + if( i == 1 ) + { + batchY <- model$predict( batchX, verbose = verbose ) + } else { + batchY <- batchY + model$predict( batchX, verbose = verbose ) + } + } + batchY <- batchY / length( modelIds ) + } else { + batchX <- array( data = 0, dim = c( 1, imageShape, 2 ) ) + batchX[1,,,,1] <- as.array( t1Preprocessed ) + batchX[1,,,,2] <- as.array( flairPreprocessed ) + + modelIds <- c( whichModel ) + if( whichModel == "all" ) + { + modelIds <- c( 0, 1, 2, 3, 4 ) + } + + for( i in seq.int( length( modelIds ) ) ) + { + modelWeightsFile <- getPretrainedNetwork( paste0( "wmh_shiva_t1_flair_", modelIds[i] ), + antsxnetCacheDirectory = antsxnetCacheDirectory ) + if( verbose ) + { + cat( "Loading", modelWeightsFile, "\n" ) + } + model <- createShivaUnetModel3D( numberOfModalities = 2 ) + model$load_weights( modelWeightsFile ) + if( i == 1 ) + { + batchY <- model$predict( batchX, verbose = verbose ) + } else { + batchY <- batchY + model$predict( batchX, verbose = verbose ) + } + } + batchY <- batchY / length( modelIds ) + } + + wmh <- as.antsImage( drop( batchY ), origin = antsGetOrigin( reorientTemplate ), + spacing = antsGetSpacing( reorientTemplate ), + direction = antsGetDirection( reorientTemplate ) ) + wmh <- applyAntsrTransformToImage( invertAntsrTransform( xfrm ), wmh, flair ) + return( wmh ) } \ No newline at end of file diff --git a/man/createShivaUnetModel3D.Rd b/man/createShivaUnetModel3D.Rd new file mode 100644 index 0000000..b8b2a4d --- /dev/null +++ b/man/createShivaUnetModel3D.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/createCustomUnetModel.R +\name{createShivaUnetModel3D} +\alias{createShivaUnetModel3D} +\title{Implementation of the "shiva" u-net architecture for PVS and WMH +segmentation} +\usage{ +createShivaUnetModel3D(numberOfModalities = 1) +} +\arguments{ +\item{numberOfModalities}{Specifies number of channels in the +architecture.} +} +\value{ +a u-net keras model +} +\description{ +Publications: + +\if{html}{\out{
}}\preformatted{* PVS: https://pubmed.ncbi.nlm.nih.gov/34262443/ +* WMH: https://pubmed.ncbi.nlm.nih.gov/38050769/ +}\if{html}{\out{
}} +} +\details{ +with respective GitHub repositories: + +\if{html}{\out{
}}\preformatted{* PVS: https://github.com/pboutinaud/SHIVA_PVS +* WMH: https://github.com/pboutinaud/SHIVA_WMH +}\if{html}{\out{
}} +} +\examples{ +\dontrun{ + +model <- createShivaUnetModel3D() + +} +} +\author{ +Tustison NJ +} diff --git a/man/getPretrainedNetwork.Rd b/man/getPretrainedNetwork.Rd index c6dcca3..fc6a457 100644 --- a/man/getPretrainedNetwork.Rd +++ b/man/getPretrainedNetwork.Rd @@ -39,21 +39,23 @@ getPretrainedNetwork( "mraVesselWeights_160", "mouseMriBrainExtraction", "mouseT2wBrainExtraction3D", "mouseT2wBrainParcellation3DNick", "mouseT2wBrainParcellation3DTct", "mouseSTPTBrainParcellation3DJay", - "pvs_shiva_t1_0.h5", "pvs_shiva_t1_1.h5", "pvs_shiva_t1_2.h5", "pvs_shiva_t1_3.h5", - "pvs_shiva_t1_4.h5", "pvs_shiva_t1_5.h5", "pvs_shiva_t1_flair_0.h5", - "pvs_shiva_t1_flair_1.h5", "pvs_shiva_t1_flair_2.h5", "pvs_shiva_t1_flair_3.h5", - "pvs_shiva_t1_flair_4.h5", "protonLungMri", "protonLobes", "pulmonaryArteryWeights", - - "pulmonaryAirwayWeights", "sixTissueOctantBrainSegmentation", - "sixTissueOctantBrainSegmentationWithPriors1", + "pvs_shiva_t1_0", "pvs_shiva_t1_1", "pvs_shiva_t1_2", "pvs_shiva_t1_3", + "pvs_shiva_t1_4", "pvs_shiva_t1_5", "pvs_shiva_t1_flair_0", "pvs_shiva_t1_flair_1", + "pvs_shiva_t1_flair_2", "pvs_shiva_t1_flair_3", "pvs_shiva_t1_flair_4", + "wmh_shiva_flair_0", "wmh_shiva_flair_1", "wmh_shiva_flair_2", "wmh_shiva_flair_3", + "wmh_shiva_flair_4", + "wmh_shiva_t1_flair_0", "wmh_shiva_t1_flair_1", + "wmh_shiva_t1_flair_2", "wmh_shiva_t1_flair_3", "wmh_shiva_t1_flair_4", + "protonLungMri", "protonLobes", "pulmonaryArteryWeights", "pulmonaryAirwayWeights", + "sixTissueOctantBrainSegmentation", "sixTissueOctantBrainSegmentationWithPriors1", "sixTissueOctantBrainSegmentationWithPriors2", "sysuMediaWmhFlairOnlyModel0", "sysuMediaWmhFlairOnlyModel1", "sysuMediaWmhFlairOnlyModel2", "sysuMediaWmhFlairT1Model0", "sysuMediaWmhFlairT1Model1", - "sysuMediaWmhFlairT1Model2", "tidsQualityAssessment", "xrayLungOrientation", + "sysuMediaWmhFlairT1Model2", + "tidsQualityAssessment", "xrayLungOrientation", "xrayLungExtraction", "chexnetClassificationModel", "chexnetClassificationANTsXNetModel", "tb_antsxnet_model", - "wholeHeadInpaintingFLAIR", - "wholeHeadInpaintingPatchBasedT1", + "wholeHeadInpaintingFLAIR", "wholeHeadInpaintingPatchBasedT1", "wholeHeadInpaintingPatchBasedFLAIR", "wholeTumorSegmentationT2Flair", "wholeLungMaskFromVentilation"), targetFileName, diff --git a/man/shivaPvsSegmentation.Rd b/man/shivaPvsSegmentation.Rd index 016eb0d..1573cd2 100644 --- a/man/shivaPvsSegmentation.Rd +++ b/man/shivaPvsSegmentation.Rd @@ -2,12 +2,13 @@ % Please edit documentation in R/whiteMatterHyperintensitySegmentation.R \name{shivaPvsSegmentation} \alias{shivaPvsSegmentation} -\title{PVS/VRS segmentation.} +\title{SHIVA PVS/VRS segmentation.} \usage{ shivaPvsSegmentation( t1, flair = NULL, whichModel = "all", + doPreprocessing = TRUE, antsxnetCacheDirectory = NULL, verbose = FALSE ) @@ -24,15 +25,15 @@ trained model or the average of the entire ensemble. I.e., options are: * For T1/FLAIR: 0, 1, 2, 3, 4. * Or "all" for using the entire ensemble.} +\item{doPreprocessing}{perform n4 bias correction, intensity truncation, brain +extraction.} + \item{antsxnetCacheDirectory}{destination directory for storing the downloaded template and model weights. Since these can be resused, if \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the inst/extdata/ subfolder of the ANTsRNet package.} \item{verbose}{print progress.} - -\item{doPreprocessing}{perform n4 bias correction, intensity truncation, brain -extraction.} } \value{ probabilistic image. diff --git a/man/shivaWmhSegmentation.Rd b/man/shivaWmhSegmentation.Rd new file mode 100644 index 0000000..9be3c0f --- /dev/null +++ b/man/shivaWmhSegmentation.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/whiteMatterHyperintensitySegmentation.R +\name{shivaWmhSegmentation} +\alias{shivaWmhSegmentation} +\title{SHIVA WMH segmentation.} +\usage{ +shivaWmhSegmentation( + flair, + t1 = NULL, + whichModel = "all", + doPreprocessing = TRUE, + antsxnetCacheDirectory = NULL, + verbose = FALSE +) +} +\arguments{ +\item{flair}{input 3-D FLAIR brain image.} + +\item{t1}{(Optional) input 3-D T1-weighted brain image (aligned to FLAIR image).} + +\item{whichModel}{integer or string. Several models were trained for the +case of T1-only or T1/FLAIR image pairs. One can use a specific single +trained model or the average of the entire ensemble. I.e., options are: +* For T1-only: 0, 1, 2, 3, 4, 5. +* For T1/FLAIR: 0, 1, 2, 3, 4. +* Or "all" for using the entire ensemble.} + +\item{doPreprocessing}{perform n4 bias correction, intensity truncation, brain +extraction.} + +\item{antsxnetCacheDirectory}{destination directory for storing the downloaded +template and model weights. Since these can be resused, if +\code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the +inst/extdata/ subfolder of the ANTsRNet package.} + +\item{verbose}{print progress.} +} +\value{ +probabilistic image. +} +\description{ +Perform segmentation of white matter hyperintensities. +\url{https://pubmed.ncbi.nlm.nih.gov/38050769/} +with the original implementation available here: +https://github.com/pboutinaud/SHIVA_WMH +} +\examples{ +\dontrun{ +library( ANTsRNet ) +library( keras ) + +t1 <- antsImageRead( "t1.nii.gz" ) +flair <- antsImageRead( "flair.nii.gz" ) +results <- wmhSegmentation( t1, flair ) +} +} +\author{ +Tustison NJ +}