diff --git a/NAMESPACE b/NAMESPACE index 8cab948..7f72bd5 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -136,6 +136,7 @@ export(getMixtureDensityLossFunction) export(getMixtureDensityMseAccuracyFunction) export(getMixtureDensitySamplingFunction) export(getPretrainedNetwork) +export(harvardOxfordAtlasLabeling) export(hippMapp3rSegmentation) export(histogramWarpImageIntensities) export(hyperMapp3rSegmentation) diff --git a/R/brainExtraction.R b/R/brainExtraction.R index dc293bd..d529af7 100644 --- a/R/brainExtraction.R +++ b/R/brainExtraction.R @@ -15,7 +15,7 @@ #' \item{"t1combined": }{Brian's combination of "t1" and "t1nobrainer". One can also specify #' "t1combined[X]" where X is the morphological radius. X = 12 by default.} #' \item{"t1threetissue": }{T1-weighted MRI---originally developed from BrainWeb20 (and later expanded). -#' Label 1: parenchyma, label 2: meninges/csf, label 3: misc. head. +#' Label 1: parenchyma, label 2: meninges/csf, label 3: misc. head.} #' \item{"flair": }{FLAIR MRI.} #' \item{"t2": }{T2-w MRI.} #' \item{"bold": }{3-D mean BOLD MRI.} diff --git a/R/desikanKillianyTourvilleLabeling.R b/R/desikanKillianyTourvilleLabeling.R index 541c2fb..217058c 100644 --- a/R/desikanKillianyTourvilleLabeling.R +++ b/R/desikanKillianyTourvilleLabeling.R @@ -234,7 +234,7 @@ desikanKillianyTourvilleLabeling <- function( t1, doPreprocessing = TRUE, # ################################ - t1Preprocessed <- t1 + t1Preprocessed <- antsImageClone( t1 ) if( doPreprocessing ) { t1Preprocessing <- preprocessBrainImage( t1, diff --git a/R/getPretrainedNetwork.R b/R/getPretrainedNetwork.R index 359925f..b76dd98 100644 --- a/R/getPretrainedNetwork.R +++ b/R/getPretrainedNetwork.R @@ -82,6 +82,7 @@ getPretrainedNetwork <- function( "dktInner", "dktOuter", "dktOuterWithSpatialPriors", + "HarvardOxfordAtlasSubcortical", "ex5_coronal_weights", "ex5_sagittal_weights", "allen_brain_mask_weights", @@ -154,7 +155,7 @@ getPretrainedNetwork <- function( "wholeHeadInpaintingFLAIR", "wholeHeadInpaintingPatchBasedT1", "wholeHeadInpaintingPatchBasedFLAIR", - "wholeTumorSegmentationT2Flair", + "wholeTumorSegmentationT2Flair", "wholeLungMaskFromVentilation", "DeepAtroposHcpT1Weights", "DeepAtroposHcpT1T2Weights", @@ -237,6 +238,7 @@ getPretrainedNetwork <- function( dktInner = "https://ndownloader.figshare.com/files/23266943", dktOuter = "https://ndownloader.figshare.com/files/23765132", dktOuterWithSpatialPriors = "https://ndownloader.figshare.com/files/24230768", + HarvardOxfordAtlasSubcortical = "https://figshare.com/ndownloader/files/50577546", e13x5_coronal_weights = "https://figshare.com/ndownloader/files/38035968", e13x5_sagittal_weights = "https://figshare.com/ndownloader/files/38035965", allen_brain_mask_weights = "https://figshare.com/ndownloader/files/36999880", diff --git a/R/harvardOxfordAtlasLabeling.R b/R/harvardOxfordAtlasLabeling.R new file mode 100644 index 0000000..bd72951 --- /dev/null +++ b/R/harvardOxfordAtlasLabeling.R @@ -0,0 +1,223 @@ +#' Subcortical and cerebellar labeling from a T1 image. +#' +#' Perform HOA labeling using deep learning and data from +# "High Resolution, Comprehensive Atlases of the Human Brain +#' Morphology" number: "NIH NIMH 5R01MH112748-04". Repository: +#' https://github.com/HOA-2/SubcorticalParcellations' +#' +#' The labeling is as follows: +#' \itemize{ +#' \item{Label 1:}{Lateral Ventricle Left} +#' \item{Label 2:}{Lateral Ventricle Right} +#' \item{Label 3:}{CSF} +#' \item{Label 4:}{Third Ventricle} +#' \item{Label 5:}{Fourth Ventricle} +#' \item{Label 6:}{5th Ventricle} +#' \item{Label 7:}{Nucleus Accumbens Left} +#' \item{Label 8:}{Nucleus Accumbens Right} +#' \item{Label 9:}{Caudate Left} +#' \item{Label 10:}{Caudate Right} +#' \item{Label 11:}{Putamen Left} +#' \item{Label 12:}{Putamen Right} +#' \item{Label 13:}{Globus Pallidus Left} +#' \item{Label 14:}{Globus Pallidus Right} +#' \item{Label 15:}{Brainstem} +#' \item{Label 16:}{Thalamus Left} +#' \item{Label 17:}{Thalamus Right} +#' \item{Label 18:}{Inferior Horn of the Lateral Ventricle Left} +#' \item{Label 19:}{Inferior Horn of the Lateral Ventricle Right} +#' \item{Label 20:}{Hippocampal Formation Left} +#' \item{Label 21:}{Hippocampal Formation Right} +#' \item{Label 22:}{Amygdala Left} +#' \item{Label 23:}{Amygdala Right} +#' \item{Label 24:}{Optic Chiasm} +#' \item{Label 25:}{VDC Anterior Left} +#' \item{Label 26:}{VDC Anterior Right} +#' \item{Label 27:}{VDC Posterior Left} +#' \item{Label 28:}{VDC Posterior Right} +#' \item{Label 29:}{Cerebellar Cortex Left} +#' \item{Label 30:}{Cerebellar Cortex Right} +#' \item{Label 31:}{Cerebellar White Matter Left} +#' \item{Label 32:}{Cerebellar White Matter Right} +#' } +#' +#' Preprocessing on the training data consisted of: +#' * n4 bias correction, +#' * brain extraction, and +#' * affine registration to HCP. +#' The input T1 should undergo the same steps. If the input T1 is the raw +#' T1, these steps can be performed by the internal preprocessing, i.e. set +#' \code{doPreprocessing = TRUE} +#' +#' @param t1 raw or preprocessed 3-D T1-weighted brain image. +#' @param doPreprocessing perform preprocessing. See description above. +#' @param verbose print progress. +#' @return list consisting of the segmentation image and probability images for +#' each label. +#' @author Tustison NJ +#' @examples +#' \dontrun{ +#' library( ANTsRNet ) +#' library( keras ) +#' +#' image <- antsImageRead( "t1.nii.gz" ) +#' results <- harvardOxfordAtlasLabeling( image ) +#' } +#' @export +harvardOxfordAtlasLabeling <- function( t1, doPreprocessing = TRUE, + verbose = FALSE ) +{ + + if( t1@dimension != 3 ) + { + stop( "Image dimension must be 3." ) + } + + reshapeImage <- function( image, cropSize, interpType = "linear" ) + { + imageResampled <- NULL + if( interpType == "linear" ) + { + imageResampled <- resampleImage( image, c( 1, 1, 1 ), useVoxels = FALSE, interpType = 0 ) + } else { + imageResampled <- resampleImage( image, c( 1, 1, 1 ), useVoxels = FALSE, interpType = 1 ) + } + imageCropped <- padOrCropImageToSize( imageResampled, cropSize ) + return( imageCropped ) + } + + whichTemplate <- "hcpyaT1Template" + templateTransformType <- "antsRegistrationSyNQuick[a]" + template <- antsImageRead( getANTsXNetData( whichTemplate ) ) + + croppedTemplateSize <- c( 160, 176, 160 ) + + ################################ + # + # Preprocess image + # + ################################ + + t1Preprocessed <- antsImageClone( t1 ) + if( doPreprocessing ) + { + t1Preprocessing <- preprocessBrainImage( t1, + truncateIntensity = NULL, + brainExtractionModality = "t1threetissue", + template = whichTemplate, + templateTransformType = templateTransformType, + doBiasCorrection = TRUE, + doDenoising = FALSE, + verbose = verbose ) + t1Preprocessed <- t1Preprocessing$preprocessedImage * t1Preprocessing$brainMask + t1Preprocessed <- reshapeImage( t1Preprocessed, cropSize = croppedTemplateSize ) + } + + ################################ + # + # Build model and load weights + # + ################################ + + labels <- 0:35 + channelSize <- 1 + numberOfClassificationLabels <- length( labels ) + + unetModelPre <- createUnetModel3D( c( croppedTemplateSize, channelSize ), + numberOfOutputs = numberOfClassificationLabels, mode = 'classification', + numberOfFilters = c( 16, 32, 64, 128 ), dropoutRate = 0.0, + convolutionKernelSize = c( 3, 3, 3 ), deconvolutionKernelSize = c( 2, 2, 2 ), + weightDecay = 0.0 ) + + penultimateLayer <- unetModelPre$layers[[length( unetModelPre$layers ) - 1]]$output + + output2 <- penultimateLayer %>% + keras::layer_conv_3d( filters = 1, + kernel_size = c( 1, 1, 1 ), + activation = 'sigmoid', + kernel_regularizer = keras::regularizer_l2( 0.0 ) ) + + unetModel <- keras::keras_model( inputs = unetModelPre$input, + outputs = list( unetModelPre$output, output2 ) ) + weightsFileNamePath <- getPretrainedNetwork( "HarvardOxfordAtlasSubcortical" ) + keras::load_model_weights_hdf5( unetModel, filepath = weightsFileNamePath ) + + ################################ + # + # Do prediction and normalize to native space + # + ################################ + + if( verbose ) + { + cat( "Model prediction using both the original and contralaterally flipped version\n" ) + } + + batchX <- array( data = 0, dim = c( 2, croppedTemplateSize, channelSize ) ) + batchX[1,,,,1] <- as.array( iMath( t1Preprocessed, "Normalize" ) ) + batchX[2,,,,1] <- batchX[1,croppedTemplateSize[1]:1,,,1] + + predictedData <- unetModel %>% predict( batchX, verbose = verbose ) + + probabilityImages <- list() + + hoaLateralLabels <- c( 0, 3, 4, 5, 6, 15, 24 ) + hoaLateralLeftLabels <- c( 1, 7, 9, 11, 13, 16, 18, 20, 22, 25, 27, 29, 31 ) + hoaLateralRightLabels <- c( 2, 8, 10, 12, 14, 17, 19, 21, 23, 26, 28, 30, 32 ) + + hoaLabels <- list() + hoaLabels[[1]] <- hoaLateralLabels + hoaLabels[[2]] <- hoaLateralLeftLabels + hoaLabels[[3]] <- hoaLateralRightLabels + + for( b in seq.int( 2 ) ) + { + for( i in seq.int( length( hoaLabels ) ) ) + { + for( j in seq.int( length( hoaLabels[[i]] ) ) ) + { + label <- hoaLabels[[i]][j] + probabilityArray <- drop( predictedData[[1]][b,,,,label+1] ) + if( label == 0 ) + { + probabilityArray <- probabilityArray + drop( rowSums( predictedData[[1]][b,,,,34:36, drop=FALSE] , dims = 4 ) ) + } + if( b == 2 ) + { + probabilityArray <- probabilityArray[dim( probabilityArray )[1]:1,,] + if( i == 2 ) + { + label <- hoaLateralRightLabels[j] + } else if( i == 3 ) { + label <- hoaLateralLeftLabels[j] + } + } + probabilityImage <- as.antsImage( probabilityArray, reference = t1Preprocessed ) + if( doPreprocessing ) + { + probabilityImage <- padOrCropImageToSize( probabilityImage, dim( template ) ) + probabilityImage <- antsApplyTransforms( fixed = t1, moving = probabilityImage, + transformlist = t1Preprocessing$templateTransforms$invtransforms, + whichtoinvert = c( TRUE ), interpolator = "linear", verbose = verbose ) + } + if( b == 1 ) + { + probabilityImages[[label + 1]] <- probabilityImage + } else { + probabilityImages[[label + 1]] <- 0.5 * ( probabilityImages[[label + 1]] + probabilityImage ) + } + } + } + } + + imageMatrix <- imageListToMatrix( probabilityImages, t1 * 0 + 1 ) + segmentationMatrix <- matrix( apply( imageMatrix, 2, which.max ), nrow = 1 ) + segmentationImage <- matrixToImages( segmentationMatrix, t1 * 0 + 1 )[[1]] - 1 + + return( list( + segmentationImage = segmentationImage, + probabilityImages = probabilityImages + ) + ) +} + diff --git a/man/brainExtraction.Rd b/man/brainExtraction.Rd index 348eb3f..f132cf3 100644 --- a/man/brainExtraction.Rd +++ b/man/brainExtraction.Rd @@ -6,9 +6,9 @@ \usage{ brainExtraction( image, - modality = c("t1", "t1.v0", "t1.v1", "t1nobrainer", "t1combined", "t2", "t2.v0", - "t2star", "flair", "flair.v0", "bold", "bold.v0", "fa", "fa.v0", "mra", "t1t2infant", - "t1infant", "t2infant"), + modality = c("t1", "t1.v0", "t1.v1", "t1nobrainer", "t1combined", "t1threetissue", + "t2", "t2.v0", "t2star", "flair", "flair.v0", "bold", "bold.v0", "fa", "fa.v0", + "mra", "t1t2infant", "t1infant", "t2infant"), verbose = FALSE ) } @@ -21,6 +21,8 @@ brainExtraction( \item{"t1nobrainer": }{T1-weighted MRI---FreeSurfer-trained: h/t Satra Ghosh and Jakub Kaczmarzyk.} \item{"t1combined": }{Brian's combination of "t1" and "t1nobrainer". One can also specify "t1combined\link{X}" where X is the morphological radius. X = 12 by default.} +\item{"t1threetissue": }{T1-weighted MRI---originally developed from BrainWeb20 (and later expanded). +Label 1: parenchyma, label 2: meninges/csf, label 3: misc. head.} \item{"flair": }{FLAIR MRI.} \item{"t2": }{T2-w MRI.} \item{"bold": }{3-D mean BOLD MRI.} diff --git a/man/createDenseNetModel2D.Rd b/man/createDenseNetModel2D.Rd index 928bb98..b3349ce 100644 --- a/man/createDenseNetModel2D.Rd +++ b/man/createDenseNetModel2D.Rd @@ -12,7 +12,7 @@ createDenseNetModel2D( numberOfDenseBlocks = 1, growthRate = 12, dropoutRate = 0.2, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = "classification" ) } diff --git a/man/createDenseNetModel3D.Rd b/man/createDenseNetModel3D.Rd index 1722887..5f5c346 100644 --- a/man/createDenseNetModel3D.Rd +++ b/man/createDenseNetModel3D.Rd @@ -12,7 +12,7 @@ createDenseNetModel3D( numberOfDenseBlocks = 1, growthRate = 12, dropoutRate = 0.2, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = "classification" ) } diff --git a/man/createDenseUnetModel2D.Rd b/man/createDenseUnetModel2D.Rd index d41d5e4..83edbf9 100644 --- a/man/createDenseUnetModel2D.Rd +++ b/man/createDenseUnetModel2D.Rd @@ -13,7 +13,7 @@ createDenseUnetModel2D( reductionRate = 0, depth = 7, dropoutRate = 0, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = c("classification", "regression") ) } diff --git a/man/createDenseUnetModel3D.Rd b/man/createDenseUnetModel3D.Rd index d422de5..6fa3426 100644 --- a/man/createDenseUnetModel3D.Rd +++ b/man/createDenseUnetModel3D.Rd @@ -13,7 +13,7 @@ createDenseUnetModel3D( reductionRate = 0, depth = 7, dropoutRate = 0, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = c("classification", "regression") ) } diff --git a/man/createResUnetModel2D.Rd b/man/createResUnetModel2D.Rd index 4282c4f..445b427 100644 --- a/man/createResUnetModel2D.Rd +++ b/man/createResUnetModel2D.Rd @@ -12,7 +12,7 @@ createResUnetModel2D( convolutionKernelSize = c(3, 3), deconvolutionKernelSize = c(2, 2), dropoutRate = 0, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = c("classification", "regression") ) } diff --git a/man/createResUnetModel3D.Rd b/man/createResUnetModel3D.Rd index 4b7b0f2..15f4721 100644 --- a/man/createResUnetModel3D.Rd +++ b/man/createResUnetModel3D.Rd @@ -12,7 +12,7 @@ createResUnetModel3D( convolutionKernelSize = c(3, 3, 3), deconvolutionKernelSize = c(2, 2, 2), dropoutRate = 0, - weightDecay = 1e-04, + weightDecay = 0.0001, mode = c("classification", "regression") ) } diff --git a/man/createRmnetGenerator.Rd b/man/createRmnetGenerator.Rd index e2239db..3c681d0 100644 --- a/man/createRmnetGenerator.Rd +++ b/man/createRmnetGenerator.Rd @@ -4,7 +4,7 @@ \alias{createRmnetGenerator} \title{Implementation of the "RMNet" generator architecture for inpainting} \usage{ -createRmnetGenerator() +createRmnetGenerator(numberOfChannels = 3) } \value{ a keras model diff --git a/man/createShivaUnetModel3D.Rd b/man/createShivaUnetModel3D.Rd index b8b2a4d..31033f4 100644 --- a/man/createShivaUnetModel3D.Rd +++ b/man/createShivaUnetModel3D.Rd @@ -5,11 +5,14 @@ \title{Implementation of the "shiva" u-net architecture for PVS and WMH segmentation} \usage{ -createShivaUnetModel3D(numberOfModalities = 1) +createShivaUnetModel3D(numberOfModalities = 1, numberOfOutputs = 1) } \arguments{ \item{numberOfModalities}{Specifies number of channels in the architecture.} + +\item{numberOfOutputs}{Specifies the number of outputs per voxel. +Determines final activation function (1 = sigmoid, >1 = softmax).} } \value{ a u-net keras model diff --git a/man/createSsdModel2D.Rd b/man/createSsdModel2D.Rd index 570cc7e..399550d 100644 --- a/man/createSsdModel2D.Rd +++ b/man/createSsdModel2D.Rd @@ -7,7 +7,7 @@ createSsdModel2D( inputImageSize, numberOfOutputs, - l2Regularization = 5e-04, + l2Regularization = 0.0005, minScale = 0.1, maxScale = 0.9, aspectRatiosPerLayer = list(c("1:1", "2:1", "1:2"), c("1:1", "2:1", "1:2", "3:1", diff --git a/man/createSsdModel3D.Rd b/man/createSsdModel3D.Rd index 92de21e..969b93e 100644 --- a/man/createSsdModel3D.Rd +++ b/man/createSsdModel3D.Rd @@ -7,7 +7,7 @@ createSsdModel3D( inputImageSize, numberOfOutputs, - l2Regularization = 5e-04, + l2Regularization = 0.0005, minScale = 0.1, maxScale = 0.9, aspectRatiosPerLayer = list(c("1:1:1", "2:1:1", "1:2:1", "1:1:2"), c("1:1:1", "2:1:1", diff --git a/man/createWideResNetModel2D.Rd b/man/createWideResNetModel2D.Rd index 1e7509e..2a1d2e1 100644 --- a/man/createWideResNetModel2D.Rd +++ b/man/createWideResNetModel2D.Rd @@ -12,7 +12,7 @@ createWideResNetModel2D( residualBlockSchedule = c(16, 32, 64), poolSize = c(8, 8), dropoutRate = 0, - weightDecay = 5e-04, + weightDecay = 0.0005, mode = "classification" ) } diff --git a/man/createWideResNetModel3D.Rd b/man/createWideResNetModel3D.Rd index 5f0fa9e..07cf6ac 100644 --- a/man/createWideResNetModel3D.Rd +++ b/man/createWideResNetModel3D.Rd @@ -12,7 +12,7 @@ createWideResNetModel3D( residualBlockSchedule = c(16, 32, 64), poolSize = c(8, 8, 8), dropoutRate = 0, - weightDecay = 5e-04, + weightDecay = 0.0005, mode = c("classification", "regression") ) } diff --git a/man/getANTsXNetData.Rd b/man/getANTsXNetData.Rd index 007134d..c7cf7f9 100644 --- a/man/getANTsXNetData.Rd +++ b/man/getANTsXNetData.Rd @@ -23,7 +23,9 @@ getANTsXNetData( "DevCCF_P04_STPT_50um_BrainParcellationJayMask", "hcpyaT1Template", "hcpyaT2Template", "hcpyaFATemplate", "hcpyaTemplateBrainMask", - "hcpyaTemplateBrainSegmentation"), + "hcpyaTemplateBrainSegmentation", "hcpinterT1Template", "hcpinterT2Template", + "hcpinterFATemplate", "hcpinterTemplateBrainMask", + "hcpinterTemplateBrainSegmentation"), targetFileName ) } diff --git a/man/getPretrainedNetwork.Rd b/man/getPretrainedNetwork.Rd index b0b3d66..3c96381 100644 --- a/man/getPretrainedNetwork.Rd +++ b/man/getPretrainedNetwork.Rd @@ -14,53 +14,54 @@ getPretrainedNetwork( "brainExtractionRobustFA", "brainExtractionNoBrainer", "brainExtractionInfantT1T2", "brainExtractionInfantT1", "brainExtractionInfantT2", - "brainSegmentation", "brainSegmentationPatchBased", "bratsStage1", "bratsStage2", - "cerebellumWhole", "cerebellumTissue", "cerebellumLabels", "claustrum_axial_0", - "claustrum_axial_1", "claustrum_axial_2", "claustrum_coronal_0", - "claustrum_coronal_1", "claustrum_coronal_2", "ctHumanLung", "dbpn4x", "deepFlash", - "deepFlashLeftT1", "deepFlashRightT1", "deepFlashLeftBoth", "deepFlashRightBoth", - "deepFlashLeftT1Hierarchical", - "deepFlashRightT1Hierarchical", - "deepFlashLeftBothHierarchical", "deepFlashRightBothHierarchical", - "deepFlashLeftT1Hierarchical_ri", "deepFlashRightT1Hierarchical_ri", - "deepFlashLeftBothHierarchical_ri", "deepFlashRightBothHierarchical_ri", - "deepFlash2LeftT1Hierarchical", "deepFlash2RightT1Hierarchical", "deepFlashLeft8", - "deepFlashRight8", "deepFlashLeft16", "deepFlashRight16", "deepFlashLeft16new", - "deepFlashRight16new", "denoising", "dktInner", "dktOuter", - "dktOuterWithSpatialPriors", "ex5_coronal_weights", - "ex5_sagittal_weights", + "brainExtractionBrainWeb20", "brainSegmentation", "brainSegmentationPatchBased", + "bratsStage1", "bratsStage2", "cerebellumWhole", "cerebellumTissue", + "cerebellumLabels", "claustrum_axial_0", "claustrum_axial_1", "claustrum_axial_2", + "claustrum_coronal_0", "claustrum_coronal_1", "claustrum_coronal_2", "ctHumanLung", + "dbpn4x", "deepFlash", "deepFlashLeftT1", "deepFlashRightT1", "deepFlashLeftBoth", + "deepFlashRightBoth", + "deepFlashLeftT1Hierarchical", + "deepFlashRightT1Hierarchical", "deepFlashLeftBothHierarchical", + "deepFlashRightBothHierarchical", "deepFlashLeftT1Hierarchical_ri", + "deepFlashRightT1Hierarchical_ri", "deepFlashLeftBothHierarchical_ri", + "deepFlashRightBothHierarchical_ri", "deepFlash2LeftT1Hierarchical", + "deepFlash2RightT1Hierarchical", "deepFlashLeft8", "deepFlashRight8", + "deepFlashLeft16", "deepFlashRight16", "deepFlashLeft16new", "deepFlashRight16new", + "denoising", "dktInner", "dktOuter", "dktOuterWithSpatialPriors", + + "HarvardOxfordAtlasSubcortical", "ex5_coronal_weights", "ex5_sagittal_weights", "allen_brain_mask_weights", "functionalLungMri", "hippMapp3rInitial", "hippMapp3rRefine", "hyperMapp3r", "hypothalamus", "inpainting_sagittal_rmnet_weights", "inpainting_coronal_rmnet_weights", "inpainting_axial_rmnet_weights", "inpainting_axial_rmnet_flair_weights", "inpainting_coronal_rmnet_flair_weights", "inpainting_sagittal_rmnet_flair_weights", - "koniqMBCS", "koniqMS", "koniqMS2", "koniqMS3", "lesion_whole_brain", - "lungCtWithPriorsSegmentationWeights", "maskLobes", - "mriSuperResolution", + "koniqMBCS", "koniqMS", "koniqMS2", "koniqMS3", "lesion_whole_brain", + + "lungCtWithPriorsSegmentationWeights", "maskLobes", "mriSuperResolution", "mriModalityClassification", "mraVesselWeights_160", "mouseMriBrainExtraction", "mouseT2wBrainExtraction3D", "mouseT2wBrainParcellation3DNick", "mouseT2wBrainParcellation3DTct", "mouseSTPTBrainParcellation3DJay", "pvs_shiva_t1_0", "pvs_shiva_t1_1", "pvs_shiva_t1_2", "pvs_shiva_t1_3", "pvs_shiva_t1_4", "pvs_shiva_t1_5", "pvs_shiva_t1_flair_0", "pvs_shiva_t1_flair_1", - "pvs_shiva_t1_flair_2", "pvs_shiva_t1_flair_3", "pvs_shiva_t1_flair_4", - "wmh_shiva_flair_0", "wmh_shiva_flair_1", - "wmh_shiva_flair_2", - "wmh_shiva_flair_3", "wmh_shiva_flair_4", "wmh_shiva_t1_flair_0", - "wmh_shiva_t1_flair_1", "wmh_shiva_t1_flair_2", "wmh_shiva_t1_flair_3", - "wmh_shiva_t1_flair_4", "protonLungMri", "protonLobes", "pulmonaryArteryWeights", - "pulmonaryAirwayWeights", "sixTissueOctantBrainSegmentation", - "sixTissueOctantBrainSegmentationWithPriors1", + "pvs_shiva_t1_flair_2", "pvs_shiva_t1_flair_3", "pvs_shiva_t1_flair_4", + + "wmh_shiva_flair_0", "wmh_shiva_flair_1", "wmh_shiva_flair_2", "wmh_shiva_flair_3", + "wmh_shiva_flair_4", "wmh_shiva_t1_flair_0", "wmh_shiva_t1_flair_1", + "wmh_shiva_t1_flair_2", "wmh_shiva_t1_flair_3", "wmh_shiva_t1_flair_4", + "protonLungMri", "protonLobes", "pulmonaryArteryWeights", "pulmonaryAirwayWeights", + "sixTissueOctantBrainSegmentation", "sixTissueOctantBrainSegmentationWithPriors1", "sixTissueOctantBrainSegmentationWithPriors2", "sysuMediaWmhFlairOnlyModel0", - "sysuMediaWmhFlairOnlyModel1", "sysuMediaWmhFlairOnlyModel2", - "sysuMediaWmhFlairT1Model0", - "sysuMediaWmhFlairT1Model1", + "sysuMediaWmhFlairOnlyModel1", + "sysuMediaWmhFlairOnlyModel2", + "sysuMediaWmhFlairT1Model0", "sysuMediaWmhFlairT1Model1", "sysuMediaWmhFlairT1Model2", "tidsQualityAssessment", "xrayLungOrientation", "xrayLungExtraction", "chexnetClassificationModel", "chexnetClassificationANTsXNetModel", "tb_antsxnet_model", "wholeHeadInpaintingFLAIR", "wholeHeadInpaintingPatchBasedT1", "wholeHeadInpaintingPatchBasedFLAIR", "wholeTumorSegmentationT2Flair", "wholeLungMaskFromVentilation", "DeepAtroposHcpT1Weights", - "DeepAtroposHcpT1T2Weights", "DeepAtroposHcpT1FAWeights", + "DeepAtroposHcpT1T2Weights", + "DeepAtroposHcpT1FAWeights", "DeepAtroposHcpT1T2FAWeights"), targetFileName ) diff --git a/man/harvardOxfordAtlasLabeling.Rd b/man/harvardOxfordAtlasLabeling.Rd new file mode 100644 index 0000000..6ddd3a6 --- /dev/null +++ b/man/harvardOxfordAtlasLabeling.Rd @@ -0,0 +1,83 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/harvardOxfordAtlasLabeling.R +\name{harvardOxfordAtlasLabeling} +\alias{harvardOxfordAtlasLabeling} +\title{Subcortical and cerebellar labeling from a T1 image.} +\usage{ +harvardOxfordAtlasLabeling(t1, doPreprocessing = TRUE, verbose = FALSE) +} +\arguments{ +\item{t1}{raw or preprocessed 3-D T1-weighted brain image.} + +\item{doPreprocessing}{perform preprocessing. See description above.} + +\item{verbose}{print progress.} +} +\value{ +list consisting of the segmentation image and probability images for +each label. +} +\description{ +Perform HOA labeling using deep learning and data from +Morphology" number: "NIH NIMH 5R01MH112748-04". Repository: +https://github.com/HOA-2/SubcorticalParcellations' +} +\details{ +The labeling is as follows: +\itemize{ +\item{Label 1:}{Lateral Ventricle Left} +\item{Label 2:}{Lateral Ventricle Right} +\item{Label 3:}{CSF} +\item{Label 4:}{Third Ventricle} +\item{Label 5:}{Fourth Ventricle} +\item{Label 6:}{5th Ventricle} +\item{Label 7:}{Nucleus Accumbens Left} +\item{Label 8:}{Nucleus Accumbens Right} +\item{Label 9:}{Caudate Left} +\item{Label 10:}{Caudate Right} +\item{Label 11:}{Putamen Left} +\item{Label 12:}{Putamen Right} +\item{Label 13:}{Globus Pallidus Left} +\item{Label 14:}{Globus Pallidus Right} +\item{Label 15:}{Brainstem} +\item{Label 16:}{Thalamus Left} +\item{Label 17:}{Thalamus Right} +\item{Label 18:}{Inferior Horn of the Lateral Ventricle Left} +\item{Label 19:}{Inferior Horn of the Lateral Ventricle Right} +\item{Label 20:}{Hippocampal Formation Left} +\item{Label 21:}{Hippocampal Formation Right} +\item{Label 22:}{Amygdala Left} +\item{Label 23:}{Amygdala Right} +\item{Label 24:}{Optic Chiasm} +\item{Label 25:}{VDC Anterior Left} +\item{Label 26:}{VDC Anterior Right} +\item{Label 27:}{VDC Posterior Left} +\item{Label 28:}{VDC Posterior Right} +\item{Label 29:}{Cerebellar Cortex Left} +\item{Label 30:}{Cerebellar Cortex Right} +\item{Label 31:}{Cerebellar White Matter Left} +\item{Label 32:}{Cerebellar White Matter Right} +} + +Preprocessing on the training data consisted of: +\itemize{ +\item n4 bias correction, +\item brain extraction, and +\item affine registration to HCP. +The input T1 should undergo the same steps. If the input T1 is the raw +T1, these steps can be performed by the internal preprocessing, i.e. set +\code{doPreprocessing = TRUE} +} +} +\examples{ +\dontrun{ +library( ANTsRNet ) +library( keras ) + +image <- antsImageRead( "t1.nii.gz" ) +results <- harvardOxfordAtlasLabeling( image ) +} +} +\author{ +Tustison NJ +} diff --git a/man/neuralStyleTransfer.Rd b/man/neuralStyleTransfer.Rd index e9079a8..b2b3858 100644 --- a/man/neuralStyleTransfer.Rd +++ b/man/neuralStyleTransfer.Rd @@ -10,7 +10,7 @@ neuralStyleTransfer( initialCombinationImage = NULL, numberOfIterations = 10, learningRate = 1, - totalVariationWeight = 8.5e-05, + totalVariationWeight = 0.000085, contentWeight = 0.025, styleImageWeights = 1, contentLayerNames = c("block5_conv2"), diff --git a/man/preprocessBrainImage.Rd b/man/preprocessBrainImage.Rd index 3e9b657..b2ee63f 100644 --- a/man/preprocessBrainImage.Rd +++ b/man/preprocessBrainImage.Rd @@ -27,7 +27,7 @@ for intensity truncation.} \item{brainExtractionModality}{string or NULL. Perform brain extraction using antsxnet tools. One of "t1", "t1v0", "t1nobrainer", "t1combined", -"flair", "t2", "bold", "fa", "t1infant", "t2infant", or NULL.} +"flair", "t2", "bold", "fa", "t1infant", "t2infant", "t1threetissue", or NULL.} \item{templateTransformType}{see Details in help for \code{antsRegistration}. Typically "Rigid" or "Affine".}