diff --git a/.Rbuildignore b/.Rbuildignore index 7129571..6dc0156 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -7,3 +7,4 @@ ^_pkgdown\.yml$ ^docs$ ^pkgdown$ +^vignettes/articles$ diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 1490c6a..a2c680b 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -10,6 +10,8 @@ on: branches: [ "main", "devel" ] # Publish semver tags as releases. tags: [ 'v*.*.*' ] + pull_request: + branches: [main, devel] workflow_dispatch: env: diff --git a/.gitignore b/.gitignore index b27cb91..20e257d 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,4 @@ po/*~ rsconnect/ .Rproj.user docs +inst/doc diff --git a/.lintr b/.lintr new file mode 100644 index 0000000..3377b64 --- /dev/null +++ b/.lintr @@ -0,0 +1,4 @@ +linters: linters_with_defaults( + line_length_linter = line_length_linter(120), + object_usage_linter = NULL + ) diff --git a/DESCRIPTION b/DESCRIPTION index 78894ef..fcab531 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -38,16 +38,21 @@ Suggests: callr, httr2, RPostgres, + pool, testthat (>= 3.0.0), usethis, withr, DBI, glue, jsonlite, - purrr + purrr, + knitr, + rmarkdown, + sentryR RdMacros: mathjaxr Config/testthat/edition: 3 Encoding: UTF-8 Roxygen: list(markdown = TRUE) RoxygenNote: 7.3.1 URL: https://ttscience.github.io/unbiased/ +VignetteBuilder: knitr diff --git a/Dockerfile b/Dockerfile index 1407623..1c8d604 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,12 +22,14 @@ ENV RENV_CONFIG_SANDBOX_ENABLED=FALSE COPY ./renv ./renv COPY .Rprofile . + +# Both renv.lock and DESCRIPTION are needed to restore the R environment COPY renv.lock . +COPY DESCRIPTION . RUN R -e 'renv::restore()' COPY .Rbuildignore . -COPY DESCRIPTION . COPY NAMESPACE . COPY inst/ ./inst COPY R/ ./R diff --git a/R/api_create_study.R b/R/api_create_study.R index a6a0157..0ee2513 100644 --- a/R/api_create_study.R +++ b/R/api_create_study.R @@ -1,139 +1,112 @@ -api__minimization_pocock <- function( # nolint: cyclocomp_linter. +api__minimization_pocock <- function( + # nolint: cyclocomp_linter. identifier, name, method, arms, covariates, p, req, res) { - validation_errors <- vector() + collection <- checkmate::makeAssertCollection() - err <- checkmate::check_character(name, min.chars = 1, max.chars = 255) - if (err != TRUE) { - validation_errors <- unbiased:::append_error( - validation_errors, "name", err - ) - } + checkmate::assert( + checkmate::check_character(name, min.chars = 1, max.chars = 255), + .var.name = "name", + add = collection + ) - err <- checkmate::check_character(identifier, min.chars = 1, max.chars = 12) - if (err != TRUE) { - validation_errors <- unbiased:::append_error( - validation_errors, - "identifier", - err - ) - } + checkmate::assert( + checkmate::check_character(identifier, min.chars = 1, max.chars = 12), + .var.name = "identifier", + add = collection + ) - err <- checkmate::check_choice(method, choices = c("range", "var", "sd")) - if (err != TRUE) { - validation_errors <- unbiased:::append_error( - validation_errors, - "method", - err - ) - } + checkmate::assert( + checkmate::check_choice(method, choices = c("range", "var", "sd")), + .var.name = "method", + add = collection + ) - err <- + checkmate::assert( checkmate::check_list( arms, types = "integerish", any.missing = FALSE, min.len = 2, names = "unique" - ) - if (err != TRUE) { - validation_errors <- unbiased:::append_error( - validation_errors, - "arms", - err - ) - } + ), + .var.name = "arms", + add = collection + ) - err <- + checkmate::assert( checkmate::check_list( covariates, types = c("numeric", "list", "character"), any.missing = FALSE, - min.len = 2, + min.len = 1, names = "unique" - ) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error(validation_errors, "covariates", err) - } + ), + .var.name = "covariates3", + add = collection + ) response <- list() for (c_name in names(covariates)) { c_content <- covariates[[c_name]] - err <- checkmate::check_list( - c_content, - any.missing = FALSE, - len = 2, + checkmate::assert( + checkmate::check_list( + c_content, + any.missing = FALSE, + len = 2, + ), + .var.name = "covariates1", + add = collection ) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error( - validation_errors, - glue::glue("covariates[{c_name}]"), - err - ) - } - err <- checkmate::check_names( - names(c_content), - permutation.of = c("weight", "levels"), + + checkmate::assert( + checkmate::check_names( + names(c_content), + permutation.of = c("weight", "levels"), + ), + .var.name = "covariates2", + add = collection ) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error( - validation_errors, - glue::glue("covariates[{c_name}]"), - err - ) - } # check covariate weight - err <- checkmate::check_numeric(c_content$weight, - lower = 0, - finite = TRUE, - len = 1, - null.ok = FALSE + checkmate::assert( + checkmate::check_numeric(c_content$weight, + lower = 0, + finite = TRUE, + len = 1, + null.ok = FALSE + ), + .var.name = "weight", + add = collection ) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error( - validation_errors, - glue::glue("covariates[{c_name}][weight]"), - err - ) - } - - err <- checkmate::check_character(c_content$levels, - min.chars = 1, - min.len = 2, - unique = TRUE + + checkmate::assert( + checkmate::check_character(c_content$levels, + min.chars = 1, + min.len = 2, + unique = TRUE + ), + .var.name = "levels", + add = collection ) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error( - validation_errors, - glue::glue("covariates[{c_name}][levels]"), - err - ) - } } # check probability - p <- as.numeric(p) - err <- checkmate::check_numeric(p, lower = 0, upper = 1, len = 1) - if (err != TRUE) { - validation_errors <- - unbiased:::append_error( - validation_errors, - "p", - err - ) - } + checkmate::assert( + checkmate::check_numeric(p, + lower = 0, upper = 1, len = 1, + any.missing = FALSE, null.ok = FALSE + ), + .var.name = "p", + add = collection + ) + - if (length(validation_errors) > 0) { + if (length(collection$getMessages()) > 0) { res$status <- 400 return(list( - error = "Input validation failed", - validation_errors = validation_errors + error = "There was a problem with the input data to create the study", + validation_errors = collection$getMessages() )) } @@ -167,7 +140,7 @@ api__minimization_pocock <- function( # nolint: cyclocomp_linter. if (!is.null(r$error)) { res$status <- 503 return(list( - error = "There was a problem creating the study", + error = "There was a problem saving created study to the database", details = r$error )) } diff --git a/R/api_randomize.R b/R/api_randomize.R index 9cb6e31..855b3e6 100644 --- a/R/api_randomize.R +++ b/R/api_randomize.R @@ -1,3 +1,54 @@ +parse_pocock_parameters <- + function(db_connetion_pool, study_id, current_state) { + parameters <- + dplyr::tbl(db_connetion_pool, "study") |> + dplyr::filter(id == study_id) |> + dplyr::select(parameters) |> + dplyr::pull() + + parameters <- jsonlite::fromJSON(parameters) + + if (!checkmate::test_list(parameters, null.ok = FALSE)) { + message <- checkmate::test_list(parameters, null.ok = FALSE) + res$status <- 400 + res$body <- + list( + error = glue::glue( + "Parse validation failed. 'Parameters' must be a list: {message}" + ) + ) + + return(res) + } + + ratio_arms <- + dplyr::tbl(db_connetion_pool, "arm") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::select(name, ratio) |> + dplyr::collect() + + params <- list( + arms = ratio_arms$name, + current_state = tibble::as_tibble(current_state), + ratio = setNames(ratio_arms$ratio, ratio_arms$name), + method = parameters$method, + p = parameters$p, + weights = parameters$weights |> unlist() + ) + + if (!checkmate::test_list(params, null.ok = FALSE)) { + message <- checkmate::test_list(params, null.ok = FALSE) + res$status <- 400 + res$body <- + list(error = glue::glue( + "Parse validation failed. Input parameters must be a list: {message}" + )) + return(res) + } + + return(params) + } + api__randomize_patient <- function(study_id, current_state, req, res) { collection <- checkmate::makeAssertCollection() @@ -8,30 +59,30 @@ api__randomize_patient <- function(study_id, current_state, req, res) { checkmate::check_subset( x = req$args$study_id, choices = dplyr::tbl(db_connection_pool, "study") |> - dplyr::select("id") |> + dplyr::select(id) |> dplyr::pull() ), - .var.name = "Study ID", + .var.name = "study_id", add = collection ) # Retrieve study details, especially the ones about randomization method_randomization <- dplyr::tbl(db_connection_pool, "study") |> - dplyr::filter(.data$id == study_id) |> + dplyr::filter(id == study_id) |> dplyr::select("method") |> dplyr::pull() checkmate::assert( checkmate::check_scalar(method_randomization, null.ok = FALSE), - .var.name = "Randomization method", + .var.name = "method_randomization", add = collection ) if (length(collection$getMessages()) > 0) { res$status <- 400 return(list( - error = "Study input validation failed", + error = "There was a problem with the randomization preparation", validation_errors = collection$getMessages() )) } @@ -39,93 +90,38 @@ api__randomize_patient <- function(study_id, current_state, req, res) { # Dispatch based on randomization method to parse parameters params <- switch(method_randomization, - minimisation_pocock = tryCatch( - { - do.call( - parse_pocock_parameters, - list(db_connection_pool, study_id, current_state) - ) - }, - error = function(e) { - res$status <- 400 - res$body <- glue::glue("Error message: {conditionMessage(e)}") - logger::log_error("Error: {err}", err = e) - } + minimisation_pocock = do.call( + parse_pocock_parameters, list(db_connection_pool, study_id, current_state) ) ) arm_name <- switch(method_randomization, - minimisation_pocock = tryCatch( - { - do.call(unbiased:::randomize_minimisation_pocock, params) - }, - error = function(e) { - res$status <- 400 - res$body <- glue::glue("Error message: {conditionMessage(e)}") - logger::log_error("Error: {err}", err = e) - } + minimisation_pocock = do.call( + unbiased:::randomize_minimisation_pocock, params ) ) arm <- dplyr::tbl(db_connection_pool, "arm") |> dplyr::filter(study_id == !!study_id & .data$name == arm_name) |> - dplyr::select(arm_id = "id", "name", "ratio") |> + dplyr::select("arm_id" = "id", "name", "ratio") |> dplyr::collect() - unbiased:::save_patient(study_id, arm$arm_id) |> - dplyr::mutate(arm_name = arm$name) |> - dplyr::rename(patient_id = "id") |> - as.list() -} + randomized_patient <- unbiased:::save_patient(study_id, arm$arm_id) -parse_pocock_parameters <- - function(db_connetion_pool, study_id, current_state) { - parameters <- - dplyr::tbl(db_connetion_pool, "study") |> - dplyr::filter(id == study_id) |> - dplyr::select(parameters) |> - dplyr::pull() - - parameters <- jsonlite::fromJSON(parameters) - - if (!checkmate::test_list(parameters, null.ok = FALSE)) { - message <- checkmate::test_list(parameters, null.ok = FALSE) - res$status <- 400 - res$body <- - list( - error = glue::glue( - "Parse validation failed. 'Parameters' must be a list: {message}" - ) - ) - - return(res) - } - - ratio_arms <- - dplyr::tbl(db_connetion_pool, "arm") |> - dplyr::filter(study_id == !!study_id) |> - dplyr::select("name", "ratio") |> - dplyr::collect() - - params <- list( - arms = ratio_arms$name, - current_state = tibble::as_tibble(current_state), - ratio = setNames(ratio_arms$ratio, ratio_arms$name), - method = parameters$method, - p = parameters$p, - weights = parameters$weights |> unlist() - ) - - if (!checkmate::test_list(params, null.ok = FALSE)) { - message <- checkmate::test_list(params, null.ok = FALSE) - res$status <- 400 - res$body <- - list(error = glue::glue( - "Parse validation failed. Input parameters must be a list: {message}" - )) - return(res) - } - - return(params) + if (!is.null(randomized_patient$error)) { + res$status <- 503 + return(list( + error = "There was a problem saving randomized patient to the database", + details = randomized_patient$error + )) + } else { + randomized_patient <- + randomized_patient |> + dplyr::mutate(arm_name = arm$name) |> + dplyr::rename(patient_id = id) |> + as.list() + + return(randomized_patient) } +} diff --git a/R/db.R b/R/db.R index 8bca246..e169868 100644 --- a/R/db.R +++ b/R/db.R @@ -15,15 +15,23 @@ #' pool <- create_db_connection_pool() #' } create_db_connection_pool <- purrr::insistently(function() { + dbname <- Sys.getenv("POSTGRES_DB") + host <- Sys.getenv("POSTGRES_HOST") + port <- Sys.getenv("POSTGRES_PORT", 5432) + user <- Sys.getenv("POSTGRES_USER") + password <- Sys.getenv("POSTGRES_PASSWORD") + print( + glue::glue("Creating database connection pool to {dbname} at {host}:{port} as {user}") + ) pool::dbPool( RPostgres::Postgres(), - dbname = Sys.getenv("POSTGRES_DB"), - host = Sys.getenv("POSTGRES_HOST"), - port = Sys.getenv("POSTGRES_PORT", 5432), - user = Sys.getenv("POSTGRES_USER"), - password = Sys.getenv("POSTGRES_PASSWORD") + dbname = dbname, + host = host, + port = port, + user = user, + password = password ) -}, rate = purrr::rate_delay(2, max_times = 5)) +}, rate = purrr::rate_delay(1, max_times = 15), quiet = FALSE) get_similar_studies <- function(name, identifier) { @@ -140,14 +148,21 @@ create_study <- function( } save_patient <- function(study_id, arm_id) { - db_connection_pool <- get("db_connection_pool") - randomized_patient <- DBI::dbGetQuery( - db_connection_pool, - "INSERT INTO patient (arm_id, study_id) + r <- tryCatch( + { + randomized_patient <- DBI::dbGetQuery( + db_connection_pool, + "INSERT INTO patient (arm_id, study_id) VALUES ($1, $2) RETURNING id, arm_id", - list(arm_id, study_id) + list(arm_id, study_id) + ) + }, + error = function(cond) { + logger::log_error("Error randomizing patient: {cond}", cond = cond) + list(error = conditionMessage(cond)) + } ) - return(randomized_patient) + return(r) } diff --git a/R/run-api.R b/R/run-api.R index 9030be7..32bf0a8 100644 --- a/R/run-api.R +++ b/R/run-api.R @@ -12,6 +12,15 @@ #' #' @export run_unbiased <- function() { + tryCatch( + { + rlang::global_entrace() + }, + error = function(e) { + message("Error setting up global_entrace, it is expected in testing environment: ", e$message) + } + ) + setup_sentry() host <- Sys.getenv("UNBIASED_HOST", "0.0.0.0") port <- as.integer(Sys.getenv("UNBIASED_PORT", "3838")) assign("db_connection_pool", @@ -38,3 +47,62 @@ run_unbiased <- function() { plumber::pr_run(host = host, port = port) } } + +# hack to make sure we can mock the globalCallingHandlers +# this method needs to be present in the package environment for mocking to work +# linter disabled intentionally since this is internal method and cannot be renamed +globalCallingHandlers <- NULL # nolint + +#' setup_sentry function +#' +#' This function is used to configure Sentry, a service for real-time error tracking. +#' It uses the sentryR package to set up Sentry based on environment variables. +#' +#' @param None +#' +#' @return None. If the SENTRY_DSN environment variable is not set, the function will +#' return a message and stop execution. +#' +#' @examples +#' setup_sentry() +#' +#' @details +#' The function first checks if the SENTRY_DSN environment variable is set. If not, it +#' returns a message and stops execution. +#' If SENTRY_DSN is set, it uses the sentryR::configure_sentry function to set up Sentry with +#' the following parameters: +#' - dsn: The Data Source Name (DSN) is retrieved from the SENTRY_DSN environment variable. +#' - app_name: The application name is set to "unbiased". +#' - app_version: The application version is retrieved from the GITHUB_SHA environment variable. +#' If not set, it defaults to "unspecified". +#' - environment: The environment is retrieved from the SENTRY_ENVIRONMENT environment variable. +#' If not set, it defaults to "development". +#' - release: The release is retrieved from the SENTRY_RELEASE environment variable. +#' If not set, it defaults to "unspecified". +#' +#' @seealso \url{https://docs.sentry.io/} +setup_sentry <- function() { + sentry_dsn <- Sys.getenv("SENTRY_DSN") + if (sentry_dsn == "") { + message("SENTRY_DSN not set, skipping Sentry setup") + return() + } + + sentryR::configure_sentry( + dsn = sentry_dsn, + app_name = "unbiased", + app_version = Sys.getenv("GITHUB_SHA", "unspecified"), + environment = Sys.getenv("SENTRY_ENVIRONMENT", "development"), + release = Sys.getenv("SENTRY_RELEASE", "unspecified") + ) + + globalCallingHandlers( + error = global_calling_handler + ) +} + +global_calling_handler <- function(error) { + error$function_calls <- sys.calls() + sentryR::capture_exception(error) + signalCondition(error) +} diff --git a/README.md b/README.md index 0f93ed4..0f4c5a7 100644 --- a/README.md +++ b/README.md @@ -220,3 +220,10 @@ To calculate code coverage, you will need to install the `covr` package. Once in - `covr::package_coverage()`: This method provides a simpler, text-based code coverage report. Alternatively, you can use the provided `run_tests_with_coverage.sh` script to run Unbiased tests with code coverage. + +### Configuring Sentry +The Unbiased server offers robust error reporting capabilities through the integration of the Sentry service. To activate Sentry, simply set the `SENTRY_DSN` environment variable. Additionally, you have the flexibility to customize the setup further by configuring the following environment variables: + +* `SENTRY_ENVIRONMENT` This is used to set the environment (e.g., "production", "staging", "development"). If not set, the environment defaults to "development". + +* `SENTRY_RELEASE` This is used to set the release in Sentry. If not set, the release defaults to "unspecified". diff --git a/inst/plumber/unbiased_api/meta.R b/inst/plumber/unbiased_api/meta.R index 171e191..09622bf 100644 --- a/inst/plumber/unbiased_api/meta.R +++ b/inst/plumber/unbiased_api/meta.R @@ -6,7 +6,7 @@ #* @tag other #* @get /sha #* @serializer unboxedJSON -function(res) { +sentryR::with_captured_calls(function(req, res) { sha <- Sys.getenv("GITHUB_SHA", unset = "NULL") if (sha == "NULL") { res$status <- 404 @@ -14,4 +14,4 @@ function(res) { } else { return(sha) } -} +}) diff --git a/inst/plumber/unbiased_api/plumber.R b/inst/plumber/unbiased_api/plumber.R index 3f2b07d..06add32 100644 --- a/inst/plumber/unbiased_api/plumber.R +++ b/inst/plumber/unbiased_api/plumber.R @@ -19,8 +19,10 @@ #* #* @plumber function(api) { - meta <- plumber::pr("meta.R") - study <- plumber::pr("study.R") + meta <- plumber::pr("meta.R") |> + plumber::pr_set_error(sentryR::sentry_error_handler) + study <- plumber::pr("study.R") |> + plumber::pr_set_error(sentryR::sentry_error_handler) api |> plumber::pr_mount("/meta", meta) |> diff --git a/inst/plumber/unbiased_api/study.R b/inst/plumber/unbiased_api/study.R index f613b4e..07e7f95 100644 --- a/inst/plumber/unbiased_api/study.R +++ b/inst/plumber/unbiased_api/study.R @@ -18,13 +18,15 @@ #* @post /minimisation_pocock #* @serializer unboxedJSON #* -function(identifier, name, method, arms, covariates, p, req, res) { +sentryR::with_captured_calls(function( + identifier, name, method, arms, covariates, p, req, res +) { return( unbiased:::api__minimization_pocock( identifier, name, method, arms, covariates, p, req, res ) ) -} +}) #* Randomize one patient #* @@ -37,8 +39,8 @@ function(identifier, name, method, arms, covariates, p, req, res) { #* @serializer unboxedJSON #* -function(study_id, current_state, req, res) { +sentryR::with_captured_calls(function(study_id, current_state, req, res) { return( unbiased:::api__randomize_patient(study_id, current_state, req, res) ) -} +}) diff --git a/man/setup_sentry.Rd b/man/setup_sentry.Rd new file mode 100644 index 0000000..8de319a --- /dev/null +++ b/man/setup_sentry.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run-api.R +\name{setup_sentry} +\alias{setup_sentry} +\title{setup_sentry function} +\usage{ +setup_sentry() +} +\arguments{ +\item{None}{} +} +\value{ +None. If the SENTRY_DSN environment variable is not set, the function will +return a message and stop execution. +} +\description{ +This function is used to configure Sentry, a service for real-time error tracking. +It uses the sentryR package to set up Sentry based on environment variables. +} +\details{ +The function first checks if the SENTRY_DSN environment variable is set. If not, it +returns a message and stops execution. +If SENTRY_DSN is set, it uses the sentryR::configure_sentry function to set up Sentry with +the following parameters: +\itemize{ +\item dsn: The Data Source Name (DSN) is retrieved from the SENTRY_DSN environment variable. +\item app_name: The application name is set to "unbiased". +\item app_version: The application version is retrieved from the GITHUB_SHA environment variable. +If not set, it defaults to "unspecified". +\item environment: The environment is retrieved from the SENTRY_ENVIRONMENT environment variable. +If not set, it defaults to "development". +\item release: The release is retrieved from the SENTRY_RELEASE environment variable. +If not set, it defaults to "unspecified". +} +} +\examples{ +setup_sentry() + +} +\seealso{ +\url{https://docs.sentry.io/} +} diff --git a/renv.lock b/renv.lock index f303d39..c4ecca1 100644 --- a/renv.lock +++ b/renv.lock @@ -9,6 +9,13 @@ ] }, "Packages": { + "BH": { + "Package": "BH", + "Version": "1.81.0-1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "68122010f01c4dcfbe58ce7112f2433d" + }, "DBI": { "Package": "DBI", "Version": "1.2.0", @@ -20,6 +27,47 @@ ], "Hash": "3e0051431dff9acfe66c23765e55c556" }, + "MASS": { + "Package": "MASS", + "Version": "7.3-57", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "methods", + "stats", + "utils" + ], + "Hash": "71476c1d88d1ebdf31580e5a257d5d31" + }, + "Matrix": { + "Package": "Matrix", + "Version": "1.4-1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "graphics", + "grid", + "lattice", + "methods", + "stats", + "utils" + ], + "Hash": "699c47c606293bdfbc9fd78a93c9c8fe" + }, + "PwrGSD": { + "Package": "PwrGSD", + "Version": "2.3.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "survival" + ], + "Hash": "c26126e59b9b078953521379ee219a05" + }, "R6": { "Package": "R6", "Version": "2.5.1", @@ -30,6 +78,16 @@ ], "Hash": "470851b6d5d0ac559e9d01bb352b4021" }, + "RColorBrewer": { + "Package": "RColorBrewer", + "Version": "1.1-3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "45f0398006e83a5b10b72a90663d8d8c" + }, "RPostgres": { "Package": "RPostgres", "Version": "1.4.6", @@ -60,6 +118,58 @@ ], "Hash": "ae6cbbe1492f4de79c45fce06f967ce8" }, + "RcppArmadillo": { + "Package": "RcppArmadillo", + "Version": "0.12.6.6.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "methods", + "stats", + "utils" + ], + "Hash": "d2b60e0a15d73182a3a766ff0a7d0d7f" + }, + "RcppEigen": { + "Package": "RcppEigen", + "Version": "0.3.3.9.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "stats", + "utils" + ], + "Hash": "acb0a5bf38490f26ab8661b467f4f53a" + }, + "TH.data": { + "Package": "TH.data", + "Version": "1.1-2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "survival" + ], + "Hash": "5b250ad4c5863ee4a68e280fcb0a3600" + }, + "V8": { + "Package": "V8", + "Version": "4.4.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Rcpp", + "curl", + "jsonlite", + "utils" + ], + "Hash": "435359b59b8a9b8f9235135da471ea3c" + }, "askpass": { "Package": "askpass", "Version": "1.2.0", @@ -90,6 +200,47 @@ ], "Hash": "543776ae6848fde2f48ff3816d0628bc" }, + "bigD": { + "Package": "bigD", + "Version": "0.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "93637e906f3fe962413912c956eb44db" + }, + "bigmemory": { + "Package": "bigmemory", + "Version": "4.6.2", + "Source": "GitHub", + "RemoteType": "github", + "RemoteHost": "api.github.com", + "RemoteRepo": "bigmemory", + "RemoteUsername": "kaneplusplus", + "RemoteRef": "HEAD", + "RemoteSha": "3064277f4a83b74490464ea4ac5a43f76e426ada", + "Requirements": [ + "BH", + "R", + "Rcpp", + "bigmemory.sri", + "methods", + "utils", + "uuid" + ], + "Hash": "65fe01c6e8e22c8bd0c6f5b5e3ccf19e" + }, + "bigmemory.sri": { + "Package": "bigmemory.sri", + "Version": "0.1.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods" + ], + "Hash": "cd3e474a907284c598e60417a5edeb79" + }, "bit": { "Package": "bit", "Version": "4.0.5", @@ -114,6 +265,13 @@ ], "Hash": "9fe98599ca456d6552421db0d6772d8f" }, + "bitops": { + "Package": "bitops", + "Version": "1.0-7", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "b7d8d8ee39869c18d8846a184dd8a1af" + }, "blob": { "Package": "blob", "Version": "1.2.4", @@ -140,6 +298,48 @@ "Repository": "RSPM", "Hash": "976cf154dfb043c012d87cddd8bca363" }, + "broom": { + "Package": "broom", + "Version": "1.0.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "backports", + "dplyr", + "ellipsis", + "generics", + "glue", + "lifecycle", + "purrr", + "rlang", + "stringr", + "tibble", + "tidyr" + ], + "Hash": "fd25391c3c4f6ecf0fa95f1e6d15378c" + }, + "broom.helpers": { + "Package": "broom.helpers", + "Version": "1.14.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "broom", + "cli", + "dplyr", + "labelled", + "lifecycle", + "purrr", + "rlang", + "stats", + "stringr", + "tibble", + "tidyr" + ], + "Hash": "ea30eb5d9412a4a5c2740685f680cd49" + }, "bslib": { "Package": "bslib", "Version": "0.6.1", @@ -197,6 +397,19 @@ ], "Hash": "ca9c113196136f4a9ca9ce6079c2c99e" }, + "class": { + "Package": "class", + "Version": "7.3-20", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "stats", + "utils" + ], + "Hash": "da09d82223e669d270e47ed24ac8686e" + }, "cli": { "Package": "cli", "Version": "3.6.1", @@ -218,6 +431,51 @@ ], "Hash": "3f038e5ac7f41d4ac41ce658c85e3042" }, + "codetools": { + "Package": "codetools", + "Version": "0.2-19", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R" + ], + "Hash": "c089a619a7fae175d149d89164f8c7d8" + }, + "coin": { + "Package": "coin", + "Version": "1.4-3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "libcoin", + "matrixStats", + "methods", + "modeltools", + "multcomp", + "mvtnorm", + "parallel", + "stats", + "stats4", + "survival", + "utils" + ], + "Hash": "4084b5070a40ad99dad581ed3b67bd55" + }, + "colorspace": { + "Package": "colorspace", + "Version": "2.1-0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "methods", + "stats" + ], + "Hash": "f20c47fd52fae58b4e377c37bb8c335b" + }, "commonmark": { "Package": "commonmark", "Version": "1.9.0", @@ -271,6 +529,17 @@ ], "Hash": "9123f3ef96a2c1a93927d828b2fe7d4c" }, + "data.table": { + "Package": "data.table", + "Version": "1.14.10", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods" + ], + "Hash": "6ea17a32294d8ca00455825ab0cf71b9" + }, "dbplyr": { "Package": "dbplyr", "Version": "2.4.0", @@ -416,6 +685,22 @@ ], "Hash": "e85ffbebaad5f70e1a2e2ef4302b4949" }, + "e1071": { + "Package": "e1071", + "Version": "1.7-14", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "class", + "grDevices", + "graphics", + "methods", + "proxy", + "stats", + "utils" + ], + "Hash": "4ef372b716824753719a8a38b258442d" + }, "ellipsis": { "Package": "ellipsis", "Version": "0.3.2", @@ -450,6 +735,27 @@ ], "Hash": "3e8583a60163b4bc1a80016e63b9959e" }, + "farver": { + "Package": "farver", + "Version": "2.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "8106d78941f34855c440ddb946b8f7a5" + }, + "fastglm": { + "Package": "fastglm", + "Version": "0.0.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "BH", + "Rcpp", + "RcppEigen", + "bigmemory", + "methods" + ], + "Hash": "e0f222ad320efdaa48ebf88eb576bb21" + }, "fastmap": { "Package": "fastmap", "Version": "1.1.1", @@ -469,6 +775,22 @@ ], "Hash": "c2efdd5f0bcd1ea861c2d4e2a883a67d" }, + "forcats": { + "Package": "forcats", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "glue", + "lifecycle", + "magrittr", + "rlang", + "tibble" + ], + "Hash": "1a0a9a3d5083d0d573c4214576f1e690" + }, "fs": { "Package": "fs", "Version": "1.6.3", @@ -480,6 +802,19 @@ ], "Hash": "47b5f30c720c23999b913a1a635cf0bb" }, + "gdata": { + "Package": "gdata", + "Version": "3.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "gtools", + "methods", + "stats", + "utils" + ], + "Hash": "d3d6e4c174b8a5f251fd273f245f2471" + }, "generics": { "Package": "generics", "Version": "0.1.3", @@ -506,6 +841,31 @@ ], "Hash": "f70d3fe2d9e7654213a946963d1591eb" }, + "ggplot2": { + "Package": "ggplot2", + "Version": "3.4.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "cli", + "glue", + "grDevices", + "grid", + "gtable", + "isoband", + "lifecycle", + "mgcv", + "rlang", + "scales", + "stats", + "tibble", + "vctrs", + "withr" + ], + "Hash": "313d31eff2274ecf4c1d3581db7241f9" + }, "gh": { "Package": "gh", "Version": "1.4.0", @@ -543,6 +903,144 @@ ], "Hash": "e0b3a53876554bd45879e596cdb10a52" }, + "gmodels": { + "Package": "gmodels", + "Version": "2.18.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "gdata" + ], + "Hash": "6713a242cb6909e492d8169a35dfe0b0" + }, + "gsDesign": { + "Package": "gsDesign", + "Version": "3.6.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "dplyr", + "ggplot2", + "graphics", + "gt", + "magrittr", + "methods", + "r2rtf", + "rlang", + "stats", + "tibble", + "tidyr", + "tools", + "xtable" + ], + "Hash": "496b38bfc6524e1a1fc04220da550892" + }, + "gt": { + "Package": "gt", + "Version": "0.10.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "base64enc", + "bigD", + "bitops", + "cli", + "commonmark", + "dplyr", + "fs", + "glue", + "htmltools", + "htmlwidgets", + "juicyjuice", + "magrittr", + "markdown", + "reactable", + "rlang", + "sass", + "scales", + "tibble", + "tidyselect", + "xml2" + ], + "Hash": "21737c74811cccac01b5097bcb0f8b4c" + }, + "gtable": { + "Package": "gtable", + "Version": "0.3.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "glue", + "grid", + "lifecycle", + "rlang" + ], + "Hash": "b29cf3031f49b04ab9c852c912547eef" + }, + "gtools": { + "Package": "gtools", + "Version": "3.9.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods", + "stats", + "utils" + ], + "Hash": "588d091c35389f1f4a9d533c8d709b35" + }, + "gtsummary": { + "Package": "gtsummary", + "Version": "1.7.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "broom", + "broom.helpers", + "cli", + "dplyr", + "forcats", + "glue", + "gt", + "knitr", + "lifecycle", + "purrr", + "rlang", + "stringr", + "tibble", + "tidyr", + "vctrs" + ], + "Hash": "08df7405a102e3f0bdf7a13a29e8c6ab" + }, + "haven": { + "Package": "haven", + "Version": "2.5.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "cpp11", + "forcats", + "hms", + "lifecycle", + "methods", + "readr", + "rlang", + "tibble", + "tidyselect", + "vctrs" + ], + "Hash": "9171f898db9d9c4c1b2c745adc2c1ef1" + }, "highr": { "Package": "highr", "Version": "0.10", @@ -658,6 +1156,30 @@ "Repository": "RSPM", "Hash": "6154ec2223172bce8162d4153cda21f7" }, + "insight": { + "Package": "insight", + "Version": "0.19.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods", + "stats", + "utils" + ], + "Hash": "adcc19435135a4d211e5aa2e48e4f6b7" + }, + "isoband": { + "Package": "isoband", + "Version": "0.2.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "grid", + "utils" + ], + "Hash": "0080607b4a1a7b28979aecef976d8bc2" + }, "jquerylib": { "Package": "jquerylib", "Version": "0.1.4", @@ -678,6 +1200,16 @@ ], "Hash": "e1b9c55281c5adc4dd113652d9e26768" }, + "juicyjuice": { + "Package": "juicyjuice", + "Version": "0.1.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "V8" + ], + "Hash": "3bcd11943da509341838da9399e18bce" + }, "knitr": { "Package": "knitr", "Version": "1.45", @@ -694,6 +1226,34 @@ ], "Hash": "1ec462871063897135c1bcbe0fc8f07d" }, + "labeling": { + "Package": "labeling", + "Version": "0.4.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "graphics", + "stats" + ], + "Hash": "b64ec208ac5bc1852b285f665d6368b3" + }, + "labelled": { + "Package": "labelled", + "Version": "2.12.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "dplyr", + "haven", + "lifecycle", + "rlang", + "stringr", + "tidyr", + "vctrs" + ], + "Hash": "1ec27c624ece6c20431e9249bd232797" + }, "later": { "Package": "later", "Version": "1.3.1", @@ -705,6 +1265,33 @@ ], "Hash": "40401c9cf2bc2259dfe83311c9384710" }, + "lattice": { + "Package": "lattice", + "Version": "0.20-45", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "grDevices", + "graphics", + "grid", + "stats", + "utils" + ], + "Hash": "b64cdbb2b340437c4ee047a1f4c4377b" + }, + "libcoin": { + "Package": "libcoin", + "Version": "1.0-10", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "mvtnorm", + "stats" + ], + "Hash": "3f3775a14588ff5d013e5eab4453bf28" + }, "lifecycle": { "Package": "lifecycle", "Version": "1.0.3", @@ -739,57 +1326,220 @@ "methods", "timechange" ], - "Hash": "680ad542fbcf801442c83a6ac5a2126c" + "Hash": "680ad542fbcf801442c83a6ac5a2126c" + }, + "magrittr": { + "Package": "magrittr", + "Version": "2.0.3", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R" + ], + "Hash": "7ce2733a9826b3aeb1775d56fd305472" + }, + "markdown": { + "Package": "markdown", + "Version": "1.12", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "commonmark", + "utils", + "xfun" + ], + "Hash": "765cf53992401b3b6c297b69e1edb8bd" + }, + "mathjaxr": { + "Package": "mathjaxr", + "Version": "1.6-0", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "87da6ccdcee6077a7d5719406bf3ae45" + }, + "matrixStats": { + "Package": "matrixStats", + "Version": "1.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "33a3ca9e732b57244d14f5d732ffc9eb" + }, + "memoise": { + "Package": "memoise", + "Version": "2.0.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "cachem", + "rlang" + ], + "Hash": "e2817ccf4a065c5d9d7f2cfbe7c1d78c" + }, + "mgcv": { + "Package": "mgcv", + "Version": "1.8-40", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Matrix", + "R", + "graphics", + "methods", + "nlme", + "splines", + "stats", + "utils" + ], + "Hash": "c6b2fdb18cf68ab613bd564363e1ba0d" + }, + "mime": { + "Package": "mime", + "Version": "0.12", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "tools" + ], + "Hash": "18e9c28c1d3ca1560ce30658b22ce104" + }, + "miniUI": { + "Package": "miniUI", + "Version": "0.1.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "htmltools", + "shiny", + "utils" + ], + "Hash": "fec5f52652d60615fdb3957b3d74324a" + }, + "minqa": { + "Package": "minqa", + "Version": "1.2.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Rcpp" + ], + "Hash": "f48238f8d4740426ca12f53f27d004dd" + }, + "mitools": { + "Package": "mitools", + "Version": "2.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "DBI", + "methods", + "stats" + ], + "Hash": "a4b659bd0528226724d55034f11ed7cb" + }, + "modeltools": { + "Package": "modeltools", + "Version": "0.2-23", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods", + "stats", + "stats4" + ], + "Hash": "f5a957c02222589bdf625a67be68b2a9" + }, + "mstate": { + "Package": "mstate", + "Version": "0.3.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "RColorBrewer", + "data.table", + "lattice", + "rlang", + "survival", + "viridisLite" + ], + "Hash": "53ca2f4a1ab4ac93fec33c92dc22c886" + }, + "multcomp": { + "Package": "multcomp", + "Version": "1.4-25", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "TH.data", + "codetools", + "graphics", + "mvtnorm", + "sandwich", + "stats", + "survival" + ], + "Hash": "2688bf2f8d54c19534ee7d8a876d9fc7" }, - "magrittr": { - "Package": "magrittr", - "Version": "2.0.3", + "munsell": { + "Package": "munsell", + "Version": "0.5.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ - "R" + "colorspace", + "methods" ], - "Hash": "7ce2733a9826b3aeb1775d56fd305472" + "Hash": "6dfe8bf774944bd5595785e3229d8771" }, - "mathjaxr": { - "Package": "mathjaxr", - "Version": "1.6-0", + "mvnfast": { + "Package": "mvnfast", + "Version": "0.2.8", "Source": "Repository", "Repository": "RSPM", - "Hash": "87da6ccdcee6077a7d5719406bf3ae45" + "Requirements": [ + "BH", + "Rcpp", + "RcppArmadillo" + ], + "Hash": "e65cac8e8501bdfbdca0412c37bb18c9" }, - "memoise": { - "Package": "memoise", - "Version": "2.0.1", + "mvtnorm": { + "Package": "mvtnorm", + "Version": "1.2-4", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "cachem", - "rlang" + "R", + "stats" ], - "Hash": "e2817ccf4a065c5d9d7f2cfbe7c1d78c" + "Hash": "17e96668f44a28aef0981d9e17c49b59" }, - "mime": { - "Package": "mime", - "Version": "0.12", + "nlme": { + "Package": "nlme", + "Version": "3.1-162", "Source": "Repository", "Repository": "CRAN", "Requirements": [ - "tools" + "R", + "graphics", + "lattice", + "stats", + "utils" ], - "Hash": "18e9c28c1d3ca1560ce30658b22ce104" + "Hash": "0984ce8da8da9ead8643c5cbbb60f83e" }, - "miniUI": { - "Package": "miniUI", - "Version": "0.1.1.1", + "numDeriv": { + "Package": "numDeriv", + "Version": "2016.8-1.1", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "htmltools", - "shiny", - "utils" + "R" ], - "Hash": "fec5f52652d60615fdb3957b3d74324a" + "Hash": "df58958f293b166e4ab885ebcad90e02" }, "openssl": { "Package": "openssl", @@ -801,6 +1551,18 @@ ], "Hash": "2a0dc8c6adfb6f032e4d4af82d258ab5" }, + "pbv": { + "Package": "pbv", + "Version": "0.5-47", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "RcppArmadillo" + ], + "Hash": "b0fa64575651e261cfa1fdb46025cb44" + }, "pillar": { "Package": "pillar", "Version": "1.9.0", @@ -904,6 +1666,20 @@ "Repository": "RSPM", "Hash": "09eb987710984fc2905c7129c7d85e65" }, + "plotrix": { + "Package": "plotrix", + "Version": "3.8-4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "stats", + "utils" + ], + "Hash": "d47fdfc45aeba360ce9db50643de3fbd" + }, "plumber": { "Package": "plumber", "Version": "1.2.1", @@ -928,6 +1704,17 @@ ], "Hash": "8b65a7a00ef8edc5ddc6fabf0aff1194" }, + "plyr": { + "Package": "plyr", + "Version": "1.8.9", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp" + ], + "Hash": "6b8177fd19982f0020743fadbfdbd933" + }, "pool": { "Package": "pool", "Version": "1.0.1", @@ -989,6 +1776,20 @@ ], "Hash": "aa5a3864397ce6ae03458f98618395a1" }, + "progress": { + "Package": "progress", + "Version": "1.2.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "crayon", + "hms", + "prettyunits" + ], + "Hash": "f4625e061cb2865f111b47ff163a5ca6" + }, "promises": { "Package": "promises", "Version": "1.2.1", @@ -1005,6 +1806,18 @@ ], "Hash": "0d8a15c9d000970ada1ab21405387dee" }, + "proxy": { + "Package": "proxy", + "Version": "0.4-27", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats", + "utils" + ], + "Hash": "e0ef355c12942cf7a6b91a6cfaea8b3e" + }, "ps": { "Package": "ps", "Version": "1.7.5", @@ -1031,6 +1844,18 @@ ], "Hash": "1cba04a4e9414bdefc9dcaa99649a8dc" }, + "r2rtf": { + "Package": "r2rtf", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "tools" + ], + "Hash": "807989b4dccfab6440841a5e8aaa95f1" + }, "ragg": { "Package": "ragg", "Version": "1.2.7", @@ -1042,6 +1867,31 @@ ], "Hash": "90a1b8b7e518d7f90480d56453b4d062" }, + "randomizeR": { + "Package": "randomizeR", + "Version": "3.0.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "PwrGSD", + "R", + "coin", + "dplyr", + "ggplot2", + "gsDesign", + "insight", + "magrittr", + "methods", + "mstate", + "mvtnorm", + "plotrix", + "purrr", + "reshape2", + "rlang", + "survival" + ], + "Hash": "d22309ab2b609eb233d4b2e931dad265" + }, "rappdirs": { "Package": "rappdirs", "Version": "0.3.3", @@ -1074,6 +1924,54 @@ ], "Hash": "8f25ebe2ec38b1f2aef3b0d2ef76f6c4" }, + "reactR": { + "Package": "reactR", + "Version": "0.5.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "htmltools" + ], + "Hash": "c9014fd1a435b2d790dd506589cb24e5" + }, + "reactable": { + "Package": "reactable", + "Version": "0.4.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "digest", + "htmltools", + "htmlwidgets", + "jsonlite", + "reactR" + ], + "Hash": "6069eb2a6597963eae0605c1875ff14c" + }, + "readr": { + "Package": "readr", + "Version": "2.1.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "cli", + "clipr", + "cpp11", + "crayon", + "hms", + "lifecycle", + "methods", + "rlang", + "tibble", + "tzdb", + "utils", + "vroom" + ], + "Hash": "b5047343b3825f37ad9d3b5d89aa1078" + }, "rematch2": { "Package": "rematch2", "Version": "2.1.2", @@ -1108,6 +2006,19 @@ ], "Hash": "c321cd99d56443dbffd1c9e673c0c1a2" }, + "reshape2": { + "Package": "reshape2", + "Version": "1.4.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "plyr", + "stringr" + ], + "Hash": "bb5996d0bd962d214a11140d77589917" + }, "rlang": { "Package": "rlang", "Version": "1.1.3", @@ -1198,6 +2109,19 @@ ], "Hash": "a9881dfed103e83f9de151dc17002cd1" }, + "sandwich": { + "Package": "sandwich", + "Version": "3.1-0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats", + "utils", + "zoo" + ], + "Hash": "1cf6ae532f0179350862fefeb0987c9b" + }, "sass": { "Package": "sass", "Version": "0.4.8", @@ -1212,6 +2136,41 @@ ], "Hash": "168f9353c76d4c4b0a0bbf72e2c2d035" }, + "scales": { + "Package": "scales", + "Version": "1.3.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "RColorBrewer", + "cli", + "farver", + "glue", + "labeling", + "lifecycle", + "munsell", + "rlang", + "viridisLite" + ], + "Hash": "c19df082ba346b0ffa6f833e92de34d1" + }, + "sentryR": { + "Package": "sentryR", + "Version": "1.1.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "httr", + "jsonlite", + "stats", + "stringr", + "tibble", + "uuid" + ], + "Hash": "f37e91d605fbf665d7b5467ded4e539e" + }, "sessioninfo": { "Package": "sessioninfo", "Version": "1.2.2", @@ -1259,6 +2218,24 @@ ], "Hash": "3a1f41807d648a908e3c7f0334bf85e6" }, + "simstudy": { + "Package": "simstudy", + "Version": "0.7.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "backports", + "data.table", + "fastglm", + "glue", + "methods", + "mvnfast", + "pbv" + ], + "Hash": "deb66424ac81e3aa78066791e0e6b97f" + }, "sodium": { "Package": "sodium", "Version": "1.3.0", @@ -1306,6 +2283,43 @@ ], "Hash": "671a4d384ae9d32fc47a14e98bfa3dc8" }, + "survey": { + "Package": "survey", + "Version": "4.2-1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Matrix", + "R", + "graphics", + "grid", + "lattice", + "methods", + "minqa", + "mitools", + "numDeriv", + "splines", + "stats", + "survival" + ], + "Hash": "03195177db81a992f22361f8f54852f4" + }, + "survival": { + "Package": "survival", + "Version": "3.3-1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Matrix", + "R", + "graphics", + "methods", + "splines", + "stats", + "utils" + ], + "Hash": "f6189c70451d3d68e0d571235576e833" + }, "swagger": { "Package": "swagger", "Version": "3.33.1", @@ -1331,6 +2345,22 @@ ], "Hash": "15b594369e70b975ba9f064295983499" }, + "tableone": { + "Package": "tableone", + "Version": "0.13.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "e1071", + "gmodels", + "labelled", + "nlme", + "survey", + "zoo" + ], + "Hash": "b1a77da61a4c3585987241b8a1cc6b95" + }, "testthat": { "Package": "testthat", "Version": "3.2.1", @@ -1451,6 +2481,27 @@ ], "Hash": "5ac22900ae0f386e54f1c307eca7d843" }, + "truncnorm": { + "Package": "truncnorm", + "Version": "1.0-9", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "ef5b32c5194351ff409dfb37ca9468f1" + }, + "tzdb": { + "Package": "tzdb", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cpp11" + ], + "Hash": "f561504ec2897f4d46f0c7657e488ae1" + }, "urlchecker": { "Package": "urlchecker", "Version": "1.0.1", @@ -1506,6 +2557,16 @@ ], "Hash": "1fe17157424bb09c48a8b3b550c753bc" }, + "uuid": { + "Package": "uuid", + "Version": "1.1-1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "3d78edfb977a69fc7a0341bee25e163f" + }, "vctrs": { "Package": "vctrs", "Version": "0.6.4", @@ -1520,6 +2581,42 @@ ], "Hash": "266c1ca411266ba8f365fcc726444b87" }, + "viridisLite": { + "Package": "viridisLite", + "Version": "0.4.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "c826c7c4241b6fc89ff55aaea3fa7491" + }, + "vroom": { + "Package": "vroom", + "Version": "1.6.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "bit64", + "cli", + "cpp11", + "crayon", + "glue", + "hms", + "lifecycle", + "methods", + "progress", + "rlang", + "stats", + "tibble", + "tidyselect", + "tzdb", + "vctrs", + "withr" + ], + "Hash": "390f9315bc0025be03012054103d227c" + }, "waldo": { "Package": "waldo", "Version": "0.5.1", @@ -1627,6 +2724,21 @@ "Source": "Repository", "Repository": "RSPM", "Hash": "fcc4bd8e6da2d2011eb64a5e5cc685ab" + }, + "zoo": { + "Package": "zoo", + "Version": "1.8-12", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "lattice", + "stats", + "utils" + ], + "Hash": "5c715954112b45499fb1dadc6ee6ee3e" } } } diff --git a/start_unbiased_api.sh b/start_unbiased_api.sh old mode 100644 new mode 100755 diff --git a/tests/testthat/setup-testing-environment.R b/tests/testthat/setup-testing-environment.R index c09cd7b..bc06c31 100644 --- a/tests/testthat/setup-testing-environment.R +++ b/tests/testthat/setup-testing-environment.R @@ -131,6 +131,10 @@ setup_test_db_connection_pool <- function(envir = parent.frame()) { ) } +# Make sure to disable Sentry during testing +withr::local_envvar( + SENTRY_DSN = NULL +) # We will always run the API on the localhost # and on a random port @@ -219,6 +223,16 @@ withr::local_envvar( ) ) +stdout_file <- withr::local_tempfile( + fileext = ".log", + .local_envir = teardown_env() +) + +stderr_file <- withr::local_tempfile( + fileext = ".log", + .local_envir = teardown_env() +) + plumber_process <- callr::r_bg( \() { if (!requireNamespace("unbiased", quietly = TRUE)) { @@ -232,19 +246,19 @@ plumber_process <- callr::r_bg( unbiased:::run_unbiased() }, - supervise = TRUE + supervise = TRUE, + stdout = stdout_file, + stderr = stderr_file, ) withr::defer( { print("Server STDOUT:") - while (length(lines <- plumber_process$read_output_lines())) { - writeLines(lines) - } + lines <- readLines(stdout_file) + writeLines(lines) print("Server STDERR:") - while (length(lines <- plumber_process$read_error_lines())) { - writeLines(lines) - } + lines <- readLines(stderr_file) + writeLines(lines) print("Sending SIGINT to plumber process") plumber_process$interrupt() diff --git a/tests/testthat/test-DB-study.R b/tests/testthat/test-DB-study.R index ca474cb..54c05a5 100644 --- a/tests/testthat/test-DB-study.R +++ b/tests/testthat/test-DB-study.R @@ -5,10 +5,10 @@ pool <- get("db_connection_pool", envir = globalenv()) test_that("it is enough to provide a name, an identifier, and a method id", { conn <- pool::localCheckout(pool) with_db_fixtures("fixtures/example_study.yml") - expect_no_error({ - tbl(conn, "study") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "study") |> + dplyr::rows_append( + tibble::tibble( identifier = "FINE", name = "Correctly working study", method = "minimisation_pocock" @@ -19,25 +19,25 @@ test_that("it is enough to provide a name, an identifier, and a method id", { }) # first study id is 1 -new_study_id <- 1 |> as.integer() +new_study_id <- as.integer(1) test_that("deleting archivizes a study", { conn <- pool::localCheckout(pool) with_db_fixtures("fixtures/example_study.yml") - expect_no_error({ - tbl(conn, "study") |> - rows_delete( - tibble(id = new_study_id), + testthat::expect_no_error({ + dplyr::tbl(conn, "study") |> + dplyr::rows_delete( + tibble::tibble(id = new_study_id), copy = TRUE, in_place = TRUE, unmatched = "ignore" ) }) - expect_identical( - tbl(conn, "study_history") |> - filter(id == new_study_id) |> - select(-parameters, -sys_period, -timestamp) |> - collect(), - tibble( + testthat::expect_identical( + dplyr::tbl(conn, "study_history") |> + dplyr::filter(id == new_study_id) |> + dplyr::select(-parameters, -sys_period, -timestamp) |> + dplyr::collect(), + tibble::tibble( id = new_study_id, identifier = "TEST", name = "Test Study", @@ -49,11 +49,11 @@ test_that("deleting archivizes a study", { test_that("can't push arm with negative ratio", { conn <- pool::localCheckout(pool) with_db_fixtures("fixtures/example_study.yml") - expect_error( + testthat::expect_error( { - tbl(conn, "arm") |> - rows_append( - tibble( + dplyr::tbl(conn, "arm") |> + dplyr::rows_append( + tibble::tibble( study_id = 1, name = "Exception-throwing arm", ratio = -1 @@ -68,7 +68,7 @@ test_that("can't push arm with negative ratio", { test_that("can't push stratum other than factor or numeric", { conn <- pool::localCheckout(pool) with_db_fixtures("fixtures/example_study.yml") - expect_error( + testthat::expect_error( { tbl(conn, "stratum") |> rows_append( @@ -89,10 +89,10 @@ test_that("can't push stratum level outside of defined levels", { with_db_fixtures("fixtures/example_study.yml") # create a new patient return <- - expect_no_error({ - tbl(conn, "patient") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "patient") |> + dplyr::rows_append( + tibble::tibble( study_id = 1, arm_id = 1, used = TRUE @@ -104,11 +104,11 @@ test_that("can't push stratum level outside of defined levels", { added_patient_id <- return$id - expect_error( + testthat::expect_error( { - tbl(conn, "patient_stratum") |> - rows_append( - tibble( + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( patient_id = added_patient_id, stratum_id = 1, fct_value = "Female" @@ -120,10 +120,10 @@ test_that("can't push stratum level outside of defined levels", { ) # add legal value - expect_no_error({ - tbl(conn, "patient_stratum") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( patient_id = added_patient_id, stratum_id = 1, fct_value = "F" @@ -136,12 +136,12 @@ test_that("can't push stratum level outside of defined levels", { test_that("numerical constraints are enforced", { conn <- pool::localCheckout(pool) with_db_fixtures("fixtures/example_study.yml") - added_patient_id <- 1 |> as.integer() + added_patient_id <- as.integer(1) return <- - expect_no_error({ - tbl(conn, "stratum") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "stratum") |> + dplyr::rows_append( + tibble::tibble( study_id = 1, name = "age", value_type = "numeric" @@ -153,10 +153,10 @@ test_that("numerical constraints are enforced", { added_stratum_id <- return$id - expect_no_error({ - tbl(conn, "numeric_constraint") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "numeric_constraint") |> + dplyr::rows_append( + tibble::tibble( stratum_id = added_stratum_id, min_value = 18, max_value = 64 @@ -166,11 +166,11 @@ test_that("numerical constraints are enforced", { }) # and you can't add an illegal value - expect_error( + testthat::expect_error( { - tbl(conn, "patient_stratum") |> - rows_append( - tibble( + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( patient_id = added_patient_id, stratum_id = added_stratum_id, num_value = 16 @@ -182,10 +182,10 @@ test_that("numerical constraints are enforced", { ) # you can add valid value - expect_no_error({ - tbl(conn, "patient_stratum") |> - rows_append( - tibble( + testthat::expect_no_error({ + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + dplyr::tibble( patient_id = added_patient_id, stratum_id = added_stratum_id, num_value = 23 @@ -195,11 +195,11 @@ test_that("numerical constraints are enforced", { }) # but you cannot add two values for one patient one stratum - expect_error( + testthat::expect_error( { - tbl(conn, "patient_stratum") |> - rows_append( - tibble( + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( patient_id = added_patient_id, stratum_id = added_stratum_id, num_value = 24 diff --git a/tests/testthat/test-E2E-study-minimisation-pocock.R b/tests/testthat/test-E2E-study-minimisation-pocock.R index c366092..3ba3e19 100644 --- a/tests/testthat/test-E2E-study-minimisation-pocock.R +++ b/tests/testthat/test-E2E-study-minimisation-pocock.R @@ -1,4 +1,6 @@ -test_that("endpoint returns the study id, can randomize 2 patients", { +pool <- get("db_connection_pool", envir = globalenv()) + +test_that("correct request with the structure of the returned result", { response <- request(api_url) |> req_url_path("study", "minimisation_pocock") |> req_method("POST") |> @@ -25,6 +27,7 @@ test_that("endpoint returns the study id, can randomize 2 patients", { ) ) |> req_perform() + response_body <- response |> resp_body_json() @@ -46,26 +49,92 @@ test_that("endpoint returns the study id, can randomize 2 patients", { ) ) |> req_perform() + response_patient_body <- response_patient |> resp_body_json() testthat::expect_equal(response$status_code, 200) - expect_number(response_patient_body$patient_id, lower = 1) + checkmate::expect_number(response_patient_body$patient_id, lower = 1) # Endpoint Response Structure Test checkmate::expect_names( names(response_patient_body), identical.to = c("patient_id", "arm_id", "arm_name") ) + checkmate::expect_list( response_patient_body, any.missing = TRUE, null.ok = FALSE, - len = 3, type = c("numeric", "numeric", "character") + len = 3, + type = c("numeric", "numeric", "character") ) +}) + +test_that("request with one covariate at two levels", { + response_cov <- + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ) + ) + ) + ) |> + req_perform() + + response_cov_body <- + response_cov |> + resp_body_json() + + testthat::expect_equal(response_cov$status_code, 200) +}) + +test_that("request with incorrect study id", { + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() - # Incorrect Study ID + response_body <- + response |> + resp_body_json() response_study <- tryCatch( @@ -88,8 +157,284 @@ test_that("endpoint returns the study id, can randomize 2 patients", { error = function(e) e ) - checkmate::expect_set_equal( - response_study$status, 400, + testthat::expect_equal(response_study$status, 400, label = "HTTP status code") +}) + +test_that("request with patient that is assigned an arm at entry", { + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + response_current_state <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more"), + "arm" = c("placebo", "control") + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal( + response_current_state$status, 500, label = "HTTP status code" ) }) + +test_that("request with incorrect number of levels", { + response_cov <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_cov$status, 400) +}) + +test_that("request with incorrect parameter p", { + response_p <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = "A", + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_p$status, 400) +}) + +test_that("request with incorrect arms", { + response_arms <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_raw('{ + "identifier": "ABC-X", + "name": "Study ABC-X", + "method": "var", + "p": 0.85, + "arms": { + "placebo": 1, + "placebo": 1 + }, + "covariates": { + "sex": { + "weight": 1, + "levels": ["female", "male"] + }, + "weight": { + "weight": 1, + "levels": ["up to 60kg", "61-80 kg", "81 kg or more"] + } + } + }') |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_arms$status, 400) +}) + +test_that("request with incorrect method", { + response_method <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = 1, + p = 0.85, + arms = list( + "placebo" = 1, + "control" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_method$status, 400) +}) + +test_that("request with incorrect weights", { + response_weights <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "control" = 1 + ), + covariates = list( + sex = list( + weight = "1", + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_weights$status, 400) +}) + +test_that("request with incorrect ratio", { + response_ratio <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = "1", + "control" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_ratio$status, 400) +}) diff --git a/tests/testthat/test-run-api.R b/tests/testthat/test-run-api.R new file mode 100644 index 0000000..d5fd1bf --- /dev/null +++ b/tests/testthat/test-run-api.R @@ -0,0 +1,91 @@ +testthat::test_that("uses correct environment variables when setting up sentry", { + withr::local_envvar( + c( + SENTRY_DSN = "https://sentry.io/123", + GITHUB_SHA = "abc", + SENTRY_ENVIRONMENT = "production", + SENTRY_RELEASE = "1.0.0" + ) + ) + + testthat::local_mocked_bindings( + configure_sentry = function(dsn, + app_name, + app_version, + environment, + release) { + testthat::expect_equal(dsn, "https://sentry.io/123") + testthat::expect_equal(app_name, "unbiased") + testthat::expect_equal(app_version, "abc") + testthat::expect_equal(environment, "production") + testthat::expect_equal(release, "1.0.0") + }, + .package = "sentryR", + ) + + global_calling_handlers_called <- FALSE + + # mock globalCallingHandlers + testthat::local_mocked_bindings( + globalCallingHandlers = function(error) { + global_calling_handlers_called <<- TRUE + testthat::expect_equal( + unbiased:::global_calling_handler, + error + ) + }, + ) + + unbiased:::setup_sentry() + + testthat::expect_true(global_calling_handlers_called) +}) + +testthat::test_that("skips sentry setup if SENTRY_DSN is not set", { + withr::local_envvar( + c( + SENTRY_DSN = "" + ) + ) + + testthat::local_mocked_bindings( + configure_sentry = function(dsn, + app_name, + app_version, + environment, + release) { + # should not be called, so we fail the test + testthat::expect_true(FALSE) + }, + .package = "sentryR", + ) + + was_called <- FALSE + + # mock globalCallingHandlers + testthat::local_mocked_bindings( + globalCallingHandlers = function(error) { + was_called <<- TRUE + }, + ) + + testthat::expect_message(unbiased:::setup_sentry(), "SENTRY_DSN not set, skipping Sentry setup") + testthat::expect_false(was_called) +}) + +testthat::test_that("global_calling_handler captures exception and signals condition", { + error <- simpleError("test error") + + capture_exception_called <- FALSE + + testthat::local_mocked_bindings( + capture_exception = function(error) { + capture_exception_called <<- TRUE + testthat::expect_equal(error, error) + }, + .package = "sentryR", + ) + + testthat::expect_error(unbiased:::global_calling_handler(error)) + testthat::expect_true(capture_exception_called) +}) diff --git a/vignettes/articles/1000_sim_data.Rds b/vignettes/articles/1000_sim_data.Rds new file mode 100644 index 0000000..2dcc0a5 Binary files /dev/null and b/vignettes/articles/1000_sim_data.Rds differ diff --git a/vignettes/articles/helpers/functions.R b/vignettes/articles/helpers/functions.R new file mode 100644 index 0000000..2fde29a --- /dev/null +++ b/vignettes/articles/helpers/functions.R @@ -0,0 +1,116 @@ +# functions + +simulate_data_monte_carlo <- + function(def, n) { + data <- + genData(n, def) |> + mutate( + sex = as.character(sex), + age = as.character(age), + diabetes_type = as.character(diabetes_type), + hba1c = as.character(hba1c), + tpo2 = as.character(tpo2), + wound_size = as.character(wound_size) + ) |> + tibble::as_tibble() |> + tibble::add_column(arm = "") + + return(data) + } + +minimize_results <- + function(current_data, arms, weights) { + for (n in seq_len(nrow(current_data))) { + current_state <- current_data[1:n, 2:ncol(current_data)] + + current_data$arm[n] <- + randomize_minimisation_pocock( + arms = arms, + current_state = current_state, + weights = weights + ) + } + + return(current_data$arm) + } + +simple_results <- + function(current_data, arms, ratio) { + for (n in seq_len(nrow(current_data))) { + current_data$arm[n] <- + randomize_simple(arms, ratio) + } + + return(current_data$arm) + } + +# Function to generate a randomisation list +block_rand <- + function(n, block, n_groups, strata, arms = LETTERS[1:n_groups]) { + strata_grid <- expand.grid(strata) + + strata_n <- nrow(strata_grid) + + ratio <- rep(1, n_groups) + + gen_seq_list <- lapply(seq_len(strata_n), function(i) { + rand <- rpbrPar( + N = n, + rb = block, + K = n_groups, + ratio = ratio, + groups = arms, + filledBlock = FALSE + ) + getRandList(gen_seq_list(rand))[1, ] + }) + df_list <- tibble::tibble() + for (i in seq_len(strata_n)) { + local_df <- strata_grid |> + dplyr::slice(i) |> + dplyr::mutate(count = N) |> + tidyr::uncount(count) |> + tibble::add_column(rand_arm = genSeq_list[[i]]) + df_list <- rbind(local_df, df_list) + } + return(df_list) + } + +# Generate a research arm for patients in each iteration +block_results <- function(current_data) { + simulation_result <- + block_rand( + n = n, + block = c(3, 6, 9), + n_groups = 3, + strata = list( + sex = c("0", "1"), + diabetes_type = c("0", "1"), + hba1c = c("0", "1"), + tpo2 = c("0", "1"), + age = c("0", "1"), + wound_size = c("0", "1") + ), + arms = c("armA", "armB", "armC") + ) + + for (n in seq_len(nrow(current_data))) { + # "-1" is for "arm" column + current_state <- current_data[n, 2:(ncol(current_data) - 1)] + + matching_rows <- which(apply( + simulation_result[, -ncol(simulation_result)], 1, + function(row) all(row == current_state) + )) + + if (length(matching_rows) > 0) { + current_data$arm[n] <- + simulation_result[matching_rows[1], "rand_arm"] + + # Delete row from randomization list + simulation_result <- simulation_result[-matching_rows[1], , drop = FALSE] + } + } + + return(current_data$arm) +} diff --git a/vignettes/articles/helpers/run_parallel.R b/vignettes/articles/helpers/run_parallel.R new file mode 100644 index 0000000..1d13604 --- /dev/null +++ b/vignettes/articles/helpers/run_parallel.R @@ -0,0 +1,77 @@ +source("helpers/functions.R") + +# set cluster +library(parallel) +# Start parallel cluster +cl <- makeForkCluster(no_of_cores) + +results <- + parLapply(cl, 1:no_of_iterations, function(i) { + # lapply(1:no_of_iterations, funĆction(i) { + set.seed(i) + + data <- simulate_data_monte_carlo(def, n) + + # eqal weights - 1/6 + minimize_equal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC") + ) + + # double weights where the covariant is of high clinical significance + minimize_unequal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 2, + "tpo2" = 2, + "age" = 1, + "wound_size" = 2 + ) + ) + + # triple weights where the covariant is of high clinical significance + minimize_unequal_weights_3 <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 3, + "tpo2" = 3, + "age" = 1, + "wound_size" = 3 + ) + ) + + simple_data <- + simple_results( + current_data = data, + arms = c("armA", "armB", "armC"), + ratio = c("armB" = 1L, "armA" = 1L, "armC" = 1L) + ) + + block_data <- + block_results(current_data = data) + + data <- + data %>% + select(-arm) %>% + mutate( + minimize_equal_weights_arms = minimize_equal_weights, + minimize_unequal_weights_arms = minimize_unequal_weights, + minimize_unequal_weights_triple_arms = minimize_unequal_weights_3, + simple_data_arms = simple_data, + block_data_arms = block_data + ) %>% + tibble::add_column(simnr = i, .before = 1) + + return(data) + }) + +stopCluster(cl) diff --git a/vignettes/articles/minimization_randomization_comparison.Rmd b/vignettes/articles/minimization_randomization_comparison.Rmd new file mode 100644 index 0000000..bdac6eb --- /dev/null +++ b/vignettes/articles/minimization_randomization_comparison.Rmd @@ -0,0 +1,642 @@ +--- +title: "Comparison of Minimization Randomization with Other Randomization Methods. Assessing the balance of covariates." +author: + - Aleksandra Duda, Jagoda Głowacka-Walas^[Tranistion Technologies Science] +date: "`r Sys.Date()`" +output: + html_document: + toc: yes +bibliography: references.bib +link-citations: true +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE +) +``` + +## Introduction + +Randomization in clinical trials is the gold standard and is widely considered the best design for evaluating the effectiveness of new treatments compared to alternative treatments (standard of care) or placebo. Indeed, the selection of an appropriate randomisation is as important as the selection of an appropriate statistical analysis for the study and the analysis strategy, whether based on randomisation or on a population model (@berger2021roadmap). + +One of the primary advantages of randomization, particularly simple randomization (usually using flipping a coin method), is its ability to balance confounding variables across treatment groups. This is especially effective in large sample sizes (n > 200), where the random allocation of participants helps to ensure that both known and unknown confounders are evenly distributed between the study arms. This balanced distribution contributes significantly to the internal validity of the study, as it minimizes the risk of selection bias and confounding influencing the results (@lim2019randomization). + +It's important to note, however, that while simple randomization is powerful in large trials, it may not always guarantee an even distribution of confounding factors in trials with smaller sample sizes (n < 100). In such cases, the random allocation might result in imbalances in baseline characteristics between groups, which can affect the interpretation of the treatment's effectiveness. This potential limitation sets the stage for considering additional methods, such as stratified randomization, or dynamic minimization algorithms to address these challenges in smaller trials (@kang2008issues). + +This document provides a summary of the comparison of three randomization methods: simple randomization, block randomization, and adaptive randomization. Simple randomization and adaptive randomization (minimization method) are tools available in the `unbiased` package as `randomize_simple` and `randomize_minimisation_pocock` functions (@unbiased). The comparison aims to demonstrate the superiority of adaptive randomization (minimization method) over other methods in assessing the least imbalance of accompanying variables between therapeutic groups. Monte Carlo simulations were used to generate data, utilizing the `simstudy` package (@goldfeld2020simstudy). Parameters for the binary distribution of variables were based on data from the publication by @mrozikiewicz2023allogenic and information from researchers. + +The document structure is as follows: first, based on the defined parameters, data will be simulated using the Monte Carlo method for a single simulation; then, for the generated patient data, appropriate groups will be assigned to them using three randomization methods; these data will be summarized in the form of descriptive statistics along with the relevant statistical test; next, data prepared in .Rds format generated for 1000 simulations will be loaded., the results based on the standardised mean difference (SMD) test will be discussed in visual form (boxplot, violin plot) and as a percentage of success achieved in each method for the given precision (tabular summary) + +```{r setup, warning = FALSE, message=FALSE} +# load packages +library(unbiased) +library(dplyr) +library(simstudy) +library(tableone) +library(ggplot2) +library(gt) +library(gtsummary) +library(truncnorm) +library(tidyr) +library(randomizeR) +``` + +## The randomization methods considered for comparison + +In the process of comparing the balance of covariates among randomization methods, three randomization methods have been selected for evaluation: + +- **simple randomization** - simple coin toss, algorithm that gives participants equal chances of being assigned to a particular arm. The method's advantage lies in its simplicity and the elimination of predictability. However, due to its complete randomness, it may lead to imbalance in sample sizes between arms and imbalances between prognostic factors. For a large sample size (n > 200), simple randomisation gives a similar number of generated participants in each group. For a small sample size (n < 100), it results in an imbalance (@kang2008issues). + +- **block randomization** - a randomization method that takes into account defined covariates for patients. The method involves assigning patients to therapeutic arms in blocks of a fixed size, with the recommendation that the blocks have different sizes. This, to some extent, reduces the risk of researchers predicting future arm assignments. In contrast to simple randomization, the block method aims to balance the number of patients within the block, hence reducing the overall imbalance between arms (@rosenberger2015randomization). + +- **adaptive randomization using minimization method** based on @pocock1975sequential algorithm - - this randomization approach aims to balance prognostic factors across treatment arms within a clinical study. It functions by evaluating the total imbalance of these factors each time a new patient is considered for the study. The minimization method computes the overall imbalance for each potential arm assignment of the new patient, considering factors like variance or other specified criteria. The patient is then assigned to the arm where their addition results in the smallest total imbalance. This assignment is not deterministic but is made with a predetermined probability, ensuring some level of randomness in arm allocation. This method is particularly useful in trials with multiple prognostic factors or in smaller studies where traditional randomization might fail to achieve balance. + +## Assessment of covariate balance + +In the proposed approach to the assessment of randomization methods, the primary objective is to evaluate each method in terms of achieving balance in the specified covariates. The assessment of balance aims to determine whether the distributions of covariates are similarly balanced in each therapeutic group. Based on the literature, standardized mean differences (SMD) have been employed for assessing balance (@berger2021roadmap). + +The SMD method is one of the most commonly used statistics for assessing the balance of covariates, regardless of the unit of measurement. It is a statistical measure for comparing differences between two groups. The covariates in the examined case are expressed as binary variables. In the case of categorical variables, SMD is calculated using the following formula (@zhang2019balance): + +\[ SMD = \frac{{p_1 - p_2}}{{\sqrt{\frac{{p_1 \cdot (1 - p_1) + p_2 \cdot (1 - p_2)}}{2}}}} \], + +where: + +- \( p_1 \) is the proportion in the first arm, + +- \( p_2 \) is the proportion in the second arm. + +## Definied number of patients and number of iterations + +In this simulation, we are using a real use case - the planned FootCell study - non-commercial clinical research in the area of civilisation diseases - to guide our data generation process. For the FootCell study, it is anticipated that a total of 105 patients will be randomized into the trial. These patients will be equally divided among three research groups - Group A, Group B, and Group C - with each group comprising 35 patients. + +```{r, define-parameters} +# defined number of patients +n <- 105 +``` + +## Defining parameters for Monte-Carlo simulation + +The distribution of parameters for individual covariates, which will subsequently be used to validate randomization methods, has been defined using the publication @mrozikiewicz2023allogenic on allogenic interventions.. + +The publication describes the effectiveness of comparing therapy using ADSC (Adipose-Derived Stem Cells) gel versus standard therapy with fibrin gel for patients in diabetic foot ulcer treatment. The FootCell study also aims to assess the safety of advanced therapy involving live ASCs (Adipose-Derived Stem Cells) in the treatment of diabetic foot syndrome, considering two groups treated with ADSCs (one or two administrations) compared to fibrin gel. Therefore, appropriate population data have been extracted from the publication to determine distributions that can be maintained when designing the FootCell study. + +In the process of defining the study for randomization, the following covariates have been selected: + +- **gender** [male/female], + +- **diabetes type** [type I/type II], + +- **HbA1c** [up to 9/9 to 11] [%], + +- **tpo2** [up to 50/above 50] [mmHg], + +- **age** [up to 55/above 55] [years], + +- **wound size** [up to 2/above 2] [cm\(^2\)]. + +In the case of the variables gender and diabetes type in the publication @mrozikiewicz2023allogenic, they were expressed in the form of frequencies. The remaining variables were presented in terms of measures of central tendency along with an indication of variability, as well as minimum and maximum values. To determine the parameters for the binary distribution, the truncated normal distribution available in the `truncnorm` package was utilized. The truncated normal distribution is often used in statistics and probability modeling when dealing with data that is constrained to a certain range. It is particularly useful when you want to model a random variable that cannot take values beyond certain limits (@burkardt2014truncated). + +To generate the necessary information for the remaining covariates, a function `simulate_proportions_trunc` was written, utilizing the `rtruncnorm function` (@truncnorm). The parameters `mean`, `sd`, `lower`, `upper` were taken from the publication and based on expertise regarding the ranges for the parameters. + +The results are presented in a table, assuming that the outcome refers to the first category of each parameter. + +```{r, simulate-proportions-function} +# simulate parameters using truncated normal distribution +simulate_proportions_trunc <- + function(n, lower, upper, mean, sd, threshold) { + simulate_data <- + rtruncnorm( + n = n, + a = lower, + b = upper, + mean = mean, + sd = sd + ) <= threshold + + sum(simulate_data == TRUE) / n + } +``` + +```{r, parameters-result-table, tab.cap = "Summary of literature verification about strata selected parameters (Mrozikiewicz-Rakowska et. al., 2023)"} +set.seed(123) + +data.frame( + hba1c = simulate_proportions_trunc(1000, 0, 11, 7.41, 1.33, 9), + tpo2 = simulate_proportions_trunc(1000, 30, 100, 53.4, 18.4, 50), + age = simulate_proportions_trunc(1000, 0, 100, 59.2, 9.7, 55), + wound_size = simulate_proportions_trunc(1000, 0, 20, 2.7, 2.28, 2) +) |> + rename("wound size" = wound_size) |> + pivot_longer( + cols = everything(), + names_to = "parametr", + values_to = "proportions" + ) |> + mutate("first catogory of strata" = c("<=9", "<=50", "<=55", "<=2")) |> + gt() +``` + +## Generate data using Monte-Carlo simulations + +Monte-Carlo simulations were used to accumulate the data. This method is designed to model variables based on defined parameters. Variables were defined using the `simstudy` package, utilizing the `defData` function (@goldfeld2020simstudy). As all variables specify proportions, `dist = 'binary'` was used to define the variables. Due to the likely association between the type of diabetes and age – meaning that the older the patient, the higher the probability of having type II diabetes – a relationship with diabetes was established when defining the `age` variable using a logit function `link = "logit"`. The proportions for gender and diabetes were defined by the researchers and were consistent with the literature @mrozikiewicz2023allogenic. + +Using `genData` function from `simstudy` package, a data frame (**data**) was generated with an artificially adopted variable `arm`, which will be filled in by subsequent randomization methods in the arm allocation process for all `n` patients. + +```{r, defdata} +# defining variables + +# male - 0.9 +def <- simstudy::defData(varname = "sex", formula = "0.9", dist = "binary") +# type I - 0.15 +def <- simstudy::defData(def, varname = "diabetes_type", formula = "0.15", dist = "binary") +# <= 9 - 0.888 +def <- simstudy::defData(def, varname = "hba1c", formula = "0.888", dist = "binary") +# <= 50 - 0.354 +def <- simstudy::defData(def, varname = "tpo2", formula = "0.354", dist = "binary") +# correlation with diabetes type +def <- simstudy::defData( + def, + varname = "age", formula = "(diabetes_type == 0) * (-0.95)", link = "logit", dist = "binary" +) +# <= 2 - 0.302 +def <- simstudy::defData(def, varname = "wound_size", formula = "0.302", dist = "binary") +``` + +```{r, create-data} +# generate data using genData() +data <- + genData(n, def) |> + mutate( + sex = as.character(sex), + age = as.character(age), + diabetes_type = as.character(diabetes_type), + hba1c = as.character(hba1c), + tpo2 = as.character(tpo2), + wound_size = as.character(wound_size) + ) |> + as_tibble() +``` + +```{r, data-generate} +# add arm to tibble +data <- + data |> + tibble::add_column(arm = "") +``` + +```{r, data-show} +# first 5 rows of the data +head(data, 5) |> + gt() +``` + +## Minimization randomization + +To generate appropriate research arms, a function called `minimize_results` was written, utilizing the `randomize_minimisation_pocock` function available within the `unbiased` package (@unbiased). The probability parameter was set at the level defined within the function (p = 0.85). In the case of minimization randomization, to verify which type of minimization (with equal weights or unequal weights) was used, three calls to the minimize_results function were prepared: + +- **minimize_equal_weights** - each covariate weight takes a value equal to 1 divided by the number of covariates. In this case, the weight is 1/6, + +- **minimize_unequal_weights** - following the expert assessment by physicians, parameters with potentially significant impact on treatment outcomes (hba1c, tpo2, wound size) have been assigned a weight of 2. The remaining covariates have been assigned a weight of 1. + +- **minimize_unequal_weights_3** - following the expert assessment by physicians, parameters with potentially significant impact on treatment outcomes (hba1c, tpo2, wound size) have been assigned a weight of 3. The remaining covariates have been assigned a weight of 1. + +The tables present information about allocations for the first 5 patients. + +```{r, minimize-results} +# drawing an arm for each patient +minimize_results <- + function(current_data, arms, weights) { + for (n in seq_len(nrow(current_data))) { + current_state <- current_data[1:n, 2:ncol(current_data)] + + current_data$arm[n] <- + randomize_minimisation_pocock( + arms = arms, + current_state = current_state, + weights = weights + ) + } + + return(current_data) + } +``` + +```{r, minimize-equal} +set.seed(123) +# eqal weights - 1/6 +minimize_equal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC") + ) + +head(minimize_equal_weights, 5) |> + gt() +``` + +```{r, minimize-unequal-1} +set.seed(123) +# double weights where the covariant is of high clinical significance +minimize_unequal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 2, + "tpo2" = 2, + "age" = 1, + "wound_size" = 2 + ) + ) + +head(minimize_unequal_weights, 5) |> + gt() +``` + +```{r, minimize-unequal-2} +set.seed(123) +# triple weights where the covariant is of high clinical significance +minimize_unequal_weights_3 <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 3, + "tpo2" = 3, + "age" = 1, + "wound_size" = 3 + ) + ) + +head(minimize_unequal_weights_3, 5) |> + gt() +``` + +The `statistic_table` function was developed to provide information on: the distribution of the number of patients across research arms, and the distribution of covariates across research arms, along with p-value information for statistical analyses used to compare proportions - chi^2, and the exact Fisher's test, typically used for small samples. + +The function relies on the use of the `tbl_summary` function available in the `gtsummary` package (@gtsummary). + +```{r, statistics-table} +# generation of frequency and chi^2 statistic values or fisher exact test +statistics_table <- + function(data) { + data |> + mutate( + sex = ifelse(sex == "1", "men", "women"), + diabetes_type = ifelse(diabetes_type == "1", "type1", "type2"), + hba1c = ifelse(hba1c == "1", "<=9", "(9,11>"), + tpo2 = ifelse(tpo2 == "1", "<=50", ">50"), + age = ifelse(age == "1", "<=55", ">50"), + wound_size = ifelse(wound_size == "1", "<=2", ">2") + ) |> + tbl_summary( + include = c(sex, diabetes_type, hba1c, tpo2, age, wound_size), + by = arm + ) |> + modify_header(label = "") |> + modify_header(all_stat_cols() ~ "**{level}**, N = {n}") |> + bold_labels() |> + add_p() + } +``` + +The table presents a statistical summary of results for the first iteration for: + +- **Minimization with all weights equal to 1/6**. + +```{r, chi2-1, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_equal_weights) +``` + +- **Minimization with weights 2:1**. + +```{r, chi2-2, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_unequal_weights) +``` + +- **Minimization with weights 3:1**. + +```{r, chi2-3, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_unequal_weights_3) +``` + +## Simple randomization + +In the next step, appropriate arms were generated for patients using simple randomization, available through the `unbiased` package - the `randomize_simple` function (@unbiased). The `simple_results` function was called within `simple_data`, considering the initial assumption of assigning patients to three arms in a 1:1:1 ratio. + +Since this is simple randomization, it does not take into account the initial covariates, and treatment assignment occurs randomly (flip coin method). The tables illustrate an example of data output and summary statistics including a summary of the statistical tests. + +```{r, simple-result} +# simple randomization +simple_results <- + function(current_data, arms, ratio) { + for (n in seq_len(nrow(current_data))) { + current_data$arm[n] <- + randomize_simple(arms, ratio) + } + + return(current_data) + } +``` + +```{r, simple-data} +set.seed(123) + +simple_data <- + simple_results( + current_data = data, + arms = c("armA", "armB", "armC"), + ratio = c("armB" = 1L, "armA" = 1L, "armC" = 1L) + ) + +head(simple_data, 5) |> + gt() +``` + +```{r, chi2-4, tab.cap = "Summary of proportion test for simple randomization"} +statistics_table(simple_data) +``` + +## Block randomization + +Block randomization, as opposed to minimization and simple randomization methods, was developed based on the `rbprPar` function available in the `randomizeR` package (@randomizeR). Using this, the `block_rand` function was created, which, based on the defined number of patients, arms, and a list of stratifying factors, generates a randomization list with a length equal to the number of patients multiplied by the product of categories in each covariate. In the case of the specified data in the document, for one iteration, it amounts to **105 * 2^6 = 6720 rows**. This ensures that there is an appropriate number of randomisation codes for each opportunity. In the case of equal characteristics, it is certain that there are the right number of codes for the defined `n` patients. + +Based on the `block_rand` function, it is possible to generate a randomisation list, based on which patients will be allocated, with characteristics from the output `data` frame. Due to the 3 arms and the need to blind the allocation of consecutive patients, block sizes 3,6 and 9 were used for the calculations. + +In the next step, patients were assigned to research groups using the `block_results` function (based on the list generated by the function `block_rand`). A first available code from the randomization list that meets specific conditions is selected, and then it is removed from the list of available codes. Based on this, research arms are generated to ensure the appropriate number of patients in each group (based on the assumed ratio of 1:1:1). + +The tables show the assignment of patients to groups using block randomisation and summary statistics including a summary of the statistical tests. + +```{r, block-rand} +# Function to generate a randomisation list +block_rand <- + function(n, block, n_groups, strata, arms = LETTERS[1:n_groups]) { + strata_grid <- expand.grid(strata) + + strata_n <- nrow(strata_grid) + + ratio <- rep(1, n_groups) + + gen_seq_list <- lapply(seq_len(strata_n), function(i) { + rand <- rpbrPar( + N = n, + rb = block, + K = n_groups, + ratio = ratio, + groups = arms, + filledBlock = FALSE + ) + getRandList(genSeq(rand))[1, ] + }) + df_list <- tibble::tibble() + for (i in seq_len(strata_n)) { + local_df <- strata_grid |> + dplyr::slice(i) |> + dplyr::mutate(count = N) |> + tidyr::uncount(count) |> + tibble::add_column(rand_arm = gen_seq_list[[i]]) + df_list <- rbind(local_df, df_list) + } + return(df_list) + } +``` + +```{r, block-results} +# Generate a research arm for patients in each iteration +block_results <- function(current_data) { + simulation_result <- + block_rand( + n = n, + block = c(3, 6, 9), + n_groups = 3, + strata = list( + sex = c("0", "1"), + diabetes_type = c("0", "1"), + hba1c = c("0", "1"), + tpo2 = c("0", "1"), + age = c("0", "1"), + wound_size = c("0", "1") + ), + arms = c("armA", "armB", "armC") + ) + + for (n in seq_len(nrow(current_data))) { + # "-1" is for "arm" column + current_state <- current_data[n, 2:(ncol(current_data) - 1)] + + matching_rows <- which(apply( + simulation_result[, -ncol(simulation_result)], 1, + function(row) all(row == current_state) + )) + + if (length(matching_rows) > 0) { + current_data$arm[n] <- + simulation_result[matching_rows[1], "rand_arm"] + + # Delete row from randomization list + simulation_result <- simulation_result[-matching_rows[1], , drop = FALSE] + } + } + + return(current_data) +} +``` + +```{r, block-data-show} +set.seed(123) + +block_data <- + block_results(data) + +head(block_data, 5) |> + gt() +``` + +```{r, chi2-5, tab.cap = "Summary of proportion test for simple randomization"} +statistics_table(block_data) +``` + +## Generate 1000 simulations + +We have performed 1000 iterations of data generation with parameters defined above. The number of iterations indicates the number of iterations included in the Monte-Carlo simulations to accumulate data for the given parameters. This allowed for the generation of data 1000 times for 105 patients to more efficiently assess the effect of randomization methods in the context of covariate balance. + +These data were assigned to the variable `sim_data` based on the data stored in the .Rds file `1000_sim_data.Rds`, available within the vignette information on the GitHub repository of the `unbiased` package. + +```{r, simulations} +# define number of iterations +# no_of_iterations <- 1000 # nolint +# define number of cores +# no_of_cores <- 20 # nolint +# perform simulations (run carefully!) +# source("~/unbiased/vignettes/helpers/run_parallel.R") # nolint + +# read data from file +sim_data <- readRDS("1000_sim_data.Rds") +``` + +## Check balance using smd test + +In order to select the test and define the precision at a specified level, above which we assume no imbalance, a literature analysis was conducted based on publications such as @lee2021estimating, @austin2009balance, @doah2021impact, @brown2020novel, @nguyen2017double, @sanchez2003effect, @lee2022propensity, @berger2021roadmap. + +To assess the balance for covariates between the research groups A, B, C, the Standardized Mean Difference (SMD) test was employed, which compares two groups. Since there are three groups in the example, the SMD test is computed for each pair of comparisons: A vs B, A vs C, and B vs C. The average SMD test for a given covariate is then calculated based on these comparisons. + +In the literature analysis, the precision level ranged between 0.1-0.2. For small samples, it was expected that the SMD test would exceed 0.2 (@austin2009balance). Additionally, according to the publication by @sanchez2003effect, there is no golden standard that dictates a specific threshold for the SMD test to be considered balanced. Generally, the smaller the SMD test, the smaller the difference in covariate imbalance. + +In the analyzed example, due to the sample size of 105 patients, a threshold of 0.2 for the SMD test was adopted. + +A function called `smd_covariants_data` was written to generate frames that produce the SMD test for each covariate in each iteration, utilizing the `CreateTableOne` function available in the `tableone` package (@tableone). In cases where the test result is <0.001, a value of 0 was assigned. + +The results for each randomization method were stored in the `cov_balance_data`. + +```{r, define-strata-vars} +# definied covariants +vars <- c("sex", "age", "diabetes_type", "wound_size", "tpo2", "hba1c") +``` + +```{r, smd-covariants-data} +smd_covariants_data <- + function(data, vars, strata) { + result_table <- + lapply(unique(data$simnr), function(i) { + current_data <- data[data$simnr == i, ] + arms_to_check <- setdiff(names(current_data), c(vars, "id", "simnr")) + # check SMD for any covariants + lapply(arms_to_check, function(arm) { + tab <- + CreateTableOne( + vars = vars, + data = current_data, + strata = arm + ) + + results_smd <- + ExtractSmd(tab) |> + as.data.frame() |> + tibble::rownames_to_column("covariants") |> + select(covariants, results = average) |> + mutate(results = round(as.numeric(results), 3)) + + results <- + bind_cols( + simnr = i, + strata = arm, + results_smd + ) + return(results) + }) |> + bind_rows() + }) |> + bind_rows() + + return(result_table) + } +``` + +```{r, cov-balance-data, echo = TRUE, results='hide'} +cov_balance_data <- + smd_covariants_data( + data = sim_data, + vars = vars + ) |> + mutate(method = case_when( + strata == "minimize_equal_weights_arms" ~ "minimize equal", + strata == "minimize_unequal_weights_arms" ~ "minimize unequal 2:1", + strata == "minimize_unequal_weights_triple_arms" ~ "minimize unequal 3:1", + strata == "simple_data_arms" ~ "simple randomization", + strata == "block_data_arms" ~ "block randomization" + )) |> + select(-strata) +``` + +Below are the results of the SMD test presented in the form of boxplot and violin plot, depicting the outcomes for each randomization method. The red dashed line indicates the adopted precision threshold. + +- **Boxplot of the combined results** + +```{r, boxplot, fig.cap= "Summary average smd in each randomization methods", warning=FALSE, fig.width=9, fig.height=6} +# boxplot +cov_balance_data |> + select(simnr, results, method) |> + group_by(simnr, method) |> + mutate(results = mean(results)) |> + distinct() |> + ggplot(aes(x = method, y = results, fill = method)) + + geom_boxplot() + + geom_hline(yintercept = 0.2, linetype = "dashed", color = "red") + + theme_bw() +``` + +- **Violin plot** + +```{r, violinplot, fig.cap= "Summary smd in each randomization methods in each covariants", warning = FALSE, fig.width=9, fig.height=6} +# violin plot +cov_balance_data |> + ggplot(aes(x = method, y = results, fill = method)) + + geom_violin() + + geom_hline( + yintercept = 0.2, + linetype = "dashed", + color = "red" + ) + + facet_wrap(~covariants, ncol = 3) + + theme_bw() + + theme(axis.text = element_text(angle = 45, vjust = 0.5, hjust = 1)) +``` + +- **Summary table of success** + +Based on the specified precision threshold of 0.2, a function defining randomization success, named `success_power`, was developed. If the SMD test value for each covariate in a given iteration is above 0.2, the function defines the analysis data as 'failure' - 0; otherwise, it is defined as 'success' - 1. + +The final success power is calculated as the sum of successes in each iteration divided by the total number of specified iterations. + +The results are summarized in a table as the percentage of success for each randomization method. + +```{r, success-power} +# function defining success of randomisation +success_power <- + function(cov_data) { + result_table <- + lapply(unique(cov_data$simnr), function(i) { + current_data <- cov_data[cov_data$simnr == i, ] + + current_data |> + group_by(method) |> + summarise(success = ifelse(any(results > 0.2), 0, 1)) |> + tibble::add_column(simnr = i, .before = 1) + }) |> + bind_rows() + + success <- + result_table |> + group_by(method) |> + summarise(results_power = sum(success) / n() * 100) + + + return(success) + } +``` + +```{r, success-result-data, tab.cap = "Summary of percent success in each randomization methods"} +success_power(cov_balance_data) |> + as.data.frame() |> + rename(`power results [%]` = results_power) |> + gt() +``` + +## Conclusion + +Considering all three randomization methods: minimization, block randomization, and simple randomization, minimization performs the best in terms of covariate balance. Simple randomization has a significant drawback, as patient allocation to arms occurs randomly with equal probability. This leads to an imbalance in both the number of patients and covariate balance, which is also random. This is particularly the case with small samples. Balancing the number of patients is possible for larger samples for n > 200. + +On the other hand, block randomization performs very well in balancing the number of patients in groups in a specified allocation ratio. However, compared to adaptive randomisation using the minimisation method, block randomisation has a lower probability in terms of balancing the co-variables. + +Minimization method, provides the highest success power by ensuring balance across covariates between groups. This is made possible by an appropriate algorithm implemented as part of minimisation randomisation. When assigning the next patient to a group, the method examines the total imbalance and then assigns the patient to the appropriate study group with a specified probability to balance the sample in terms of size, and covariates. + +# References + +--- +nocite: '@*' +... diff --git a/vignettes/articles/references.bib b/vignettes/articles/references.bib new file mode 100644 index 0000000..cdbe561 --- /dev/null +++ b/vignettes/articles/references.bib @@ -0,0 +1,222 @@ +% Encoding: UTF-8 + +@article{lim2019randomization, + title={Randomization in clinical studies}, + author={Lim, Chi-Yeon and In, Junyong}, + journal={Korean journal of anesthesiology}, + volume={72}, + number={3}, + pages={221--232}, + year={2019}, + publisher={Korean Society of Anesthesiologists} +} + + @article{goldfeld2020simstudy, + title = {simstudy: Illuminating research methods through data generation}, + author = {Keith Goldfeld and Jacob Wujciak-Jens}, + publisher = {The Open Journal}, + journal = {Journal of Open Source Software}, + year = {2020}, + volume = {5}, + number = {54}, + pages = {2763}, + url = {https://doi.org/10.21105/joss.02763}, + doi = {10.21105/joss.02763}, + } + + @article{mrozikiewicz2023allogenic, + title={Allogenic Adipose-Derived Stem Cells in Diabetic Foot Ulcer Treatment: Clinical Effectiveness, Safety, Survival in the Wound Site, and Proteomic Impact}, + author={Mrozikiewicz-Rakowska, Beata and Szab{\l}owska-Gadomska, Ilona and Cysewski, Dominik and Rudzi{\'n}ski, Stefan and P{\l}oski, Rafa{\l} and Gasperowicz, Piotr and Konarzewska, Magdalena and Zieli{\'n}ski, Jakub and Mieczkowski, Mateusz and Sie{\'n}ko, Damian and others}, + journal={International Journal of Molecular Sciences}, + volume={24}, + number={2}, + pages={1472}, + year={2023}, + publisher={MDPI} +} + +@article{pocock1975sequential, + title={Sequential treatment assignment with balancing for prognostic factors in the controlled clinical trial}, + author={Pocock, Stuart J and Simon, Richard}, + journal={Biometrics}, + pages={103--115}, + year={1975}, + publisher={JSTOR} +} + +@book{rosenberger2015randomization, + title={Randomization in clinical trials: theory and practice}, + author={Rosenberger, William F and Lachin, John M}, + year={2015}, + publisher={John Wiley \& Sons} +} + +@article{lee2021estimating, + title={Estimating COVID-19 infection and severity risks in patients with chronic rhinosinusitis: a Korean nationwide cohort study}, + author={Lee, Seung Won and Kim, So Young and Moon, Sung Yong and Yang, Jee Myung and Ha, Eun Kyo and Jee, Hye Mi and Shin, Jae Il and Cho, Seong Ho and Yon, Dong Keon and Suh, Dong In}, + journal={The Journal of Allergy and Clinical Immunology: In Practice}, + volume={9}, + number={6}, + pages={2262--2271}, + year={2021}, + publisher={Elsevier} +} + +@article{austin2009balance, + title={Balance diagnostics for comparing the distribution of baseline covariates between treatment groups in propensity-score matched samples}, + author={Austin, Peter C}, + journal={Statistics in medicine}, + volume={28}, + number={25}, + pages={3083--3107}, + year={2009}, + publisher={Wiley Online Library} +} + +@article{doah2021impact, + title={The impact of primary tumor resection on survival in asymptomatic colorectal cancer patients with unresectable metastases}, + author={Doah, Ki Yoon and Shin, Ui Sup and Jeon, Byong Ho and Cho, Sang Sik and Moon, Sun Mi}, + journal={Annals of Coloproctology}, + volume={37}, + number={2}, + pages={94}, + year={2021}, + publisher={Korean Society of Coloproctology} +} + +@article{brown2020novel, + title={A novel approach for propensity score matching and stratification for multiple treatments: Application to an electronic health record--derived study}, + author={Brown, Derek W and DeSantis, Stacia M and Greene, Thomas J and Maroufy, Vahed and Yaseen, Ashraf and Wu, Hulin and Williams, George and Swartz, Michael D}, + journal={Statistics in medicine}, + volume={39}, + number={17}, + pages={2308--2323}, + year={2020}, + publisher={Wiley Online Library} +} + +@article{nguyen2017double, + title={Double-adjustment in propensity score matching analysis: choosing a threshold for considering residual imbalance}, + author={Nguyen, Tri-Long and Collins, Gary S and Spence, Jessica and Daur{\`e}s, Jean-Pierre and Devereaux, PJ and Landais, Paul and Le Manach, Yannick}, + journal={BMC medical research methodology}, + volume={17}, + pages={1--8}, + year={2017}, + publisher={Springer} +} + +@article{sanchez2003effect, + title={Effect-size indices for dichotomized outcomes in meta-analysis.}, + author={S{\'a}nchez-Meca, Julio and Mar{\'\i}n-Mart{\'\i}nez, Fulgencio and Chac{\'o}n-Moscoso, Salvador}, + journal={Psychological methods}, + volume={8}, + number={4}, + pages={448}, + year={2003}, + publisher={American Psychological Association} +} + +@article{lee2022propensity, + title={Propensity score matching for causal inference and reducing the confounding effects: statistical standard and guideline of Life Cycle Committee}, + author={Lee, Seung Won and Acharya, Krishna Prasad and others}, + journal={Life Cycle}, + volume={2}, + year={2022}, + publisher={Life Cycle} +} + +@article{zhang2019balance, + title={Balance diagnostics after propensity score matching}, + author={Zhang, Zhongheng and Kim, Hwa Jung and Lonjon, Guillaume and Zhu, Yibing and others}, + journal={Annals of translational medicine}, + volume={7}, + number={1}, + year={2019}, + publisher={AME Publications} +} + + @Manual{truncnorm, + title = {truncnorm: Truncated Normal Distribution}, + author = {Olaf Mersmann and Heike Trautmann and Detlef Steuer and Björn Bornkamp}, + year = {2023}, + note = {R package version 1.0-9}, + url = {https://github.com/olafmersmann/truncnorm}, + } + +@article{burkardt2014truncated, + title={The truncated normal distribution}, + author={Burkardt, John}, + journal={Department of Scientific Computing Website, Florida State University}, + volume={1}, + pages={35}, + year={2014} +} + + @Manual{tableone, + title = {tableone: Create 'Table 1' to Describe Baseline Characteristics with or +without Propensity Score Weights}, + author = {Kazuki Yoshida and Alexander Bartel}, + year = {2022}, + note = {R package version 0.13.2}, + url = {https://github.com/kaz-yos/tableone}, + } + @article{randomizeR, + title = {{randomizeR}: An {R} Package for the Assessment and Implementation of Randomization in Clinical Trials}, + author = {Diane Uschner and David Schindler and Ralf-Dieter Hilgers and Nicole Heussen}, + journal = {Journal of Statistical Software}, + year = {2018}, + volume = {85}, + number = {8}, + pages = {1--22}, + doi = {10.18637/jss.v085.i08}, + } + + + @article{gtsummary, + author = {Daniel D. Sjoberg and Karissa Whiting and Michael Curry and Jessica A. Lavery and Joseph Larmarange}, + title = {Reproducible Summary Tables with the gtsummary Package}, + journal = {{The R Journal}}, + year = {2021}, + url = {https://doi.org/10.32614/RJ-2021-053}, + doi = {10.32614/RJ-2021-053}, + volume = {13}, + issue = {1}, + pages = {570-580}, + } + +@article{berger2021roadmap, + title={A roadmap to using randomization in clinical trials}, + author={Berger, Vance W and Bour, Louis Joseph and Carter, Kerstine and Chipman, Jonathan J and Everett, Colin C and Heussen, Nicole and Hewitt, Catherine and Hilgers, Ralf-Dieter and Luo, Yuqun Abigail and Renteria, Jone and others}, + journal={BMC Medical Research Methodology}, + volume={21}, + pages={1--24}, + year={2021}, + publisher={Springer} +} + +@article{kang2008issues, + title={Issues in outcomes research: an overview of randomization techniques for clinical trials}, + author={Kang, Minsoo and Ragan, Brian G and Park, Jae-Hyeon}, + journal={Journal of athletic training}, + volume={43}, + number={2}, + pages={215--221}, + year={2008}, + publisher={The National Athletic Trainers' Association, Inc c/o Hughston Sports~…} +} + + @Manual{truncnorm, + title = {truncnorm: Truncated Normal Distribution}, + author = {Olaf Mersmann and Heike Trautmann and Detlef Steuer and Björn Bornkamp}, + year = {2023}, + note = {R package version 1.0-9}, + url = {https://github.com/olafmersmann/truncnorm}, + } + + @Manual{unbiased, + title = {unbiased: Diverse Randomization Algorithms for Clinical Trials}, + author = {Kamil Sijko and Kinga Sałata and Aleksandra Duda and Łukasz Wałejko}, + year = {2024}, + note = {R package version 0.0.0.9003}, + url = {https://ttscience.github.io/unbiased/}, + }