diff --git a/.Rbuildignore b/.Rbuildignore index 4862ac6..6dc0156 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,3 +1,10 @@ ^renv$ ^renv\.lock$ ^\.github$ +^unbiased\.Rproj$ +^\.Rproj\.user$ +^LICENSE\.md$ +^_pkgdown\.yml$ +^docs$ +^pkgdown$ +^vignettes/articles$ diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..392966e --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,21 @@ +FROM ghcr.io/rocker-org/devcontainer/r-ver:4.2 + +RUN apt update && apt-get install -y --no-install-recommends \ + # httpuv + libz-dev \ + # sodium + libsodium-dev \ + # RPostgres + libpq-dev libssl-dev postgresql-client \ + # R_X11 + libxt-dev + +RUN pip install watchdog[watchmedo] + +ENV RENV_CONFIG_SANDBOX_ENABLED=FALSE + +# Install database migration tool +RUN curl -L https://packagecloud.io/golang-migrate/migrate/gpgkey | apt-key add - && \ + echo "deb https://packagecloud.io/golang-migrate/migrate/ubuntu/ focal main" > /etc/apt/sources.list.d/migrate.list && \ + apt-get update && \ + apt-get install -y migrate diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..2bf7c52 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +{ + "name": "R unbiased", + "dockerComposeFile": "docker-compose.yml", + "service": "app", + "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}", + "features": { + "ghcr.io/rocker-org/devcontainer-features/renv-cache:0": {}, + "ghcr.io/rocker-org/devcontainer-features/rstudio-server:0": { + "singleUser": true, + "version": "stable" + } + }, + "postCreateCommand": "R -q -e 'renv::restore()'", + // "postAttachCommand": { + // "rstudio-start": "rserver" + // }, + "forwardPorts": [ + 8787, + 5454 + ], + "portsAttributes": { + "8787": { + "label": "RStudio IDE" + }, + "5454": { + "label": "PGAdmin" + } + }, + "customizations": { + "vscode": { + "extensions": [ + "RDebugger.r-debugger" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "r.rterm.linux": "/usr/local/bin/radian", + "r.bracketedPaste": true, + "r.plot.useHttpgd": true + } + } + } +} \ No newline at end of file diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 0000000..a341a70 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,57 @@ +version: '3.8' + +services: + app: + build: + context: .. + dockerfile: .devcontainer/Dockerfile + + volumes: + - ../..:/workspaces:cached + + # Overrides default command so things don't shut down after the process ends. + command: sleep infinity + + # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function. + network_mode: service:db + + # Use "forwardPorts" in **devcontainer.json** to forward an app port locally. + # (Adding the "ports" property to this file will not forward from a Codespace.) + + environment: + POSTGRES_USER: postgres + POSTGRES_DB: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_HOST: db + + pgadmin: + image: dpage/pgadmin4 + environment: + POSTGRES_USER: postgres + POSTGRES_DB: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_HOST: db + PGADMIN_DEFAULT_EMAIL: pgadmin@example.com + PGADMIN_DEFAULT_PASSWORD: pgadmin + volumes: + - pga-data:/tmp/dev/pga/data + depends_on: + - db + ports: + - "5454:80" + + db: + image: ghcr.io/ttscience/postgres-temporal-tables/postgres-temporal-tables:latest + restart: unless-stopped + volumes: + - postgres-data:/var/lib/postgresql/data + environment: + POSTGRES_USER: postgres + POSTGRES_DB: postgres + POSTGRES_PASSWORD: postgres + # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally. + # (Adding the "ports" property to this file will not forward from a Codespace.) + +volumes: + postgres-data: + pga-data: diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml deleted file mode 100644 index ac76e84..0000000 --- a/.github/workflows/R-CMD-check.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples -# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help -on: - push: - branches: [main, devel] - pull_request: - branches: [main, devel] - -name: R-CMD-check - -jobs: - R-CMD-check: - runs-on: ubuntu-latest - - name: Ubuntu (latest) - - strategy: - fail-fast: false - - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - R_KEEP_PKG_SOURCE: yes - - steps: - - uses: actions/checkout@v3 - - - uses: r-lib/actions/setup-pandoc@v2 - - - name: Build image - run: docker build -t unbiased --build-arg github_sha=${{ github.sha }} . - - - name: Run tests - run: docker compose -f "docker-compose.test.yaml" up --abort-on-container-exit --exit-code-from tests --attach tests diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 233a154..a2c680b 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -1,4 +1,4 @@ -name: Docker +name: Build and Publish Docker Images # This workflow uses actions that are not certified by GitHub. # They are provided by a third-party and are governed by @@ -6,14 +6,12 @@ name: Docker # documentation. on: - schedule: - - cron: '15 0 * * *' push: - branches: [ "main" ] + branches: [ "main", "devel" ] # Publish semver tags as releases. tags: [ 'v*.*.*' ] pull_request: - branches: [ "main" ] + branches: [main, devel] workflow_dispatch: env: @@ -21,6 +19,7 @@ env: REGISTRY: ghcr.io # github.repository as / IMAGE_NAME: ${{ github.repository }} + BRANCH_TAG: ${{ github.ref == 'refs/heads/devel' && 'unbiased-dev' || 'latest' }} jobs: diff --git a/.github/workflows/document.yaml b/.github/workflows/document.yaml new file mode 100644 index 0000000..8ac7bab --- /dev/null +++ b/.github/workflows/document.yaml @@ -0,0 +1,42 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + paths: ["R/**"] + +name: Document + +jobs: + document: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup R + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - name: Install dependencies + uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::roxygen2 + needs: roxygen2 + + - name: Document + run: roxygen2::roxygenise() + shell: Rscript {0} + + - name: Commit and push changes + run: | + git config --local user.name "$GITHUB_ACTOR" + git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" + git add man/\* NAMESPACE DESCRIPTION + git commit -m "Update documentation" || echo "No changes to commit" + git pull --ff-only + git push origin diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000..5d4cb21 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,32 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, devel] + pull_request: + branches: [main, devel] + +name: lint + +jobs: + lint: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v4 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::lintr, local::. + needs: lint + + - name: Lint + run: lintr::lint_package() + shell: Rscript {0} + env: + LINTR_ERROR_ON_LINT: true diff --git a/.github/workflows/pkgdown.yaml b/.github/workflows/pkgdown.yaml new file mode 100644 index 0000000..e05694f --- /dev/null +++ b/.github/workflows/pkgdown.yaml @@ -0,0 +1,51 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + release: + types: [published] + workflow_dispatch: + +name: pkgdown + +jobs: + pkgdown: + runs-on: ubuntu-latest + # Only restrict concurrency for non-PR jobs + concurrency: + group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-pandoc: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::pkgdown, local::. + needs: website + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-renv@v2 + + - name: Build site + run: pkgdown::build_site_github_pages(new_process = FALSE, install = TRUE) + shell: Rscript {0} + + - name: Deploy to GitHub pages 🚀 + if: github.event_name != 'pull_request' + uses: JamesIves/github-pages-deploy-action@v4.4.1 + with: + clean: false + branch: gh-pages + folder: docs diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml new file mode 100644 index 0000000..7cb2dd5 --- /dev/null +++ b/.github/workflows/test-coverage.yaml @@ -0,0 +1,79 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, devel] + pull_request: + branches: [main, devel] + +name: test-coverage + +jobs: + test-coverage: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + POSTGRES_DB: postgres + POSTGRES_HOST: 127.0.0.1 + POSTGRES_PORT: 5432 + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + + services: + postgres: + image: ghcr.io/ttscience/postgres-temporal-tables/postgres-temporal-tables:latest + env: + POSTGRES_PASSWORD: postgres + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::covr + needs: coverage + + - name: Install migrate + run: | + curl -L https://packagecloud.io/golang-migrate/migrate/gpgkey | \ + sudo apt-key add - && \ + echo "deb https://packagecloud.io/golang-migrate/migrate/ubuntu/ focal main" | \ + sudo tee /etc/apt/sources.list.d/migrate.list && \ + sudo apt-get update && \ + sudo apt-get install -y migrate + + - name: Test coverage + run: | + covr::codecov( + quiet = FALSE, + clean = FALSE, + install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") + ) + shell: Rscript {0} + + - name: Show testthat output + if: always() + run: | + ## -------------------------------------------------------------------- + find ${{ runner.temp }}/package -name 'testthat.Rout*' -exec cat '{}' \; || true + shell: bash + + - name: Upload test results + if: failure() + uses: actions/upload-artifact@v3 + with: + name: coverage-test-failures + path: ${{ runner.temp }}/package diff --git a/.gitignore b/.gitignore index e75435c..20e257d 100644 --- a/.gitignore +++ b/.gitignore @@ -47,3 +47,6 @@ po/*~ # RStudio Connect folder rsconnect/ +.Rproj.user +docs +inst/doc diff --git a/.lintr b/.lintr new file mode 100644 index 0000000..3377b64 --- /dev/null +++ b/.lintr @@ -0,0 +1,4 @@ +linters: linters_with_defaults( + line_length_linter = line_length_linter(120), + object_usage_linter = NULL + ) diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..668377d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "files.watcherExclude": { + "**/renv/**": true + } +} \ No newline at end of file diff --git a/DESCRIPTION b/DESCRIPTION new file mode 100644 index 0000000..572c9ee --- /dev/null +++ b/DESCRIPTION @@ -0,0 +1,58 @@ +Package: unbiased +Title: Diverse Randomization Algorithms for Clinical Trials +Version: 1.0.0 +Authors@R: c( + person("Kamil", "Sijko", , "kamil.sijko@ttsi.com.pl", + role = c("aut", "cre"), comment = c(ORCID = "0000-0002-2203-1065")), + person("Kinga", "SaƂata", , "kinga.salata@ttsi.com.pl", + role = c("aut")), + person("Aleksandra", "Duda", , "aleksandra.duda@ttsi.com.pl", + role = c("aut")), + person("Ɓukasz", "WaƂejko", , "lukasz.walejko@ttsi.com.pl", + role = c("aut")), + person("Jagoda", "GƂowacka-Walas", "jagoda.glowacka-walas@ttsi.com.pl", + role = c("aut"), comment = c(ORCID = "0000-0002-7628-8691")), + person("MichaƂ", "Seweryn", , "michal.seweryn@biol.uni.lodz.pl", + role = c("ctr"), comment = c(ORCID = "0000-0002-9090-3435")), + person("Transition Technologies Science Sp. z o.o.", role = c("fnd", "cph")) + ) +Description: The Unbiased package offers a comprehensive suite of randomization + algorithms for clinical trials, encompassing dynamic strategies like the + minimization method, simple randomization approaches, and block randomization + techniques. Its primary purpose is to provide a harmonized set of functions that + will seamlessly integrate with a production-ready plumber API, also contained + within the package. This integration is designed to facilitate a smooth and + efficient interface with electronic Case Report Form (eCRF) systems, enhancing + the capability of clinical trials to manage patient allocation. +License: MIT + file LICENSE +Imports: + checkmate, + dbplyr, + plumber, + mathjaxr, + tibble, + tidyr, + dplyr, + rlang +Suggests: + callr, + httr2, + RPostgres, + pool, + testthat (>= 3.0.0), + usethis, + withr, + DBI, + glue, + jsonlite, + purrr, + knitr, + rmarkdown, + sentryR +RdMacros: mathjaxr +Config/testthat/edition: 3 +Encoding: UTF-8 +Roxygen: list(markdown = TRUE) +RoxygenNote: 7.3.1 +URL: https://ttscience.github.io/unbiased/ +VignetteBuilder: knitr diff --git a/Dockerfile b/Dockerfile index 5354402..1284b21 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rocker/r-ver:4.3.1 +FROM rocker/r-ver:4.2.3 WORKDIR /src/unbiased @@ -7,21 +7,39 @@ RUN apt update && apt-get install -y --no-install-recommends \ # httpuv libz-dev \ # sodium - libsodium-dev + libsodium-dev \ + # RPostgres + libpq-dev libssl-dev postgresql-client \ + curl gnupg2 + +# Install database migration tool +RUN curl -L https://packagecloud.io/golang-migrate/migrate/gpgkey | apt-key add - && \ + echo "deb https://packagecloud.io/golang-migrate/migrate/ubuntu/ focal main" > /etc/apt/sources.list.d/migrate.list && \ + apt-get update && \ + apt-get install -y migrate ENV RENV_CONFIG_SANDBOX_ENABLED=FALSE COPY ./renv ./renv COPY .Rprofile . + +# Both renv.lock and DESCRIPTION are needed to restore the R environment COPY renv.lock . +COPY DESCRIPTION . RUN R -e 'renv::restore()' -COPY api/ ./api +COPY .Rbuildignore . +COPY NAMESPACE . +COPY inst/ ./inst +COPY R/ ./R +COPY tests/ ./inst/tests + +RUN R -e "devtools::install('.')" EXPOSE 3838 ARG github_sha ENV GITHUB_SHA=${github_sha} -CMD ["R", "-e", "plumber::plumb(dir = 'api') |> plumber::pr_run(host = '0.0.0.0', port = 3838)"] +CMD ["R", "-e", "unbiased::run_unbiased()"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d9a4b92 --- /dev/null +++ b/LICENSE @@ -0,0 +1,2 @@ +YEAR: 2024 +COPYRIGHT HOLDER: Transition Technologies Science sp. z o.o. diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..063c07c --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,21 @@ +# MIT License + +Copyright (c) 2024 Transition Technologies Science sp. z o.o. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NAMESPACE b/NAMESPACE new file mode 100644 index 0000000..18be837 --- /dev/null +++ b/NAMESPACE @@ -0,0 +1,9 @@ +# Generated by roxygen2: do not edit by hand + +export(create_db_connection_pool) +export(randomize_minimisation_pocock) +export(randomize_simple) +export(run_unbiased) +import(checkmate) +import(dplyr) +import(mathjaxr) diff --git a/NEWS.md b/NEWS.md new file mode 100644 index 0000000..0b60b14 --- /dev/null +++ b/NEWS.md @@ -0,0 +1,5 @@ +# unbiased (development version) + +* Initialized package structure. +* Implemented study definition endpoint (`POST /study`). +* Implemented study details endpoints (`GET /study`, `GET /study/`). diff --git a/R/api-audit-log.R b/R/api-audit-log.R new file mode 100644 index 0000000..cba00fd --- /dev/null +++ b/R/api-audit-log.R @@ -0,0 +1,27 @@ +api_get_audit_log <- function(study_id, req, res) { + audit_log_disable_for_request(req) + + if (!check_study_exist(study_id = study_id)) { + res$status <- 404 + return( + list(error = "Study not found") + ) + } + + # Get audit trial + audit_trail <- dplyr::tbl(db_connection_pool, "audit_log") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::arrange(created_at) |> + dplyr::collect() + + audit_trail$request_body <- purrr::map( + audit_trail$request_body, + jsonlite::fromJSON + ) + audit_trail$response_body <- purrr::map( + audit_trail$response_body, + jsonlite::fromJSON + ) + + return(audit_trail) +} diff --git a/R/api_create_study.R b/R/api_create_study.R new file mode 100644 index 0000000..f225e88 --- /dev/null +++ b/R/api_create_study.R @@ -0,0 +1,150 @@ +api__minimization_pocock <- function( + # nolint: cyclocomp_linter. + identifier, name, method, arms, covariates, p, req, res) { + audit_log_set_event_type("study_create", req) + + collection <- checkmate::makeAssertCollection() + + checkmate::assert( + checkmate::check_character(name, min.chars = 1, max.chars = 255), + .var.name = "name", + add = collection + ) + + checkmate::assert( + checkmate::check_character(identifier, min.chars = 1, max.chars = 12), + .var.name = "identifier", + add = collection + ) + + checkmate::assert( + checkmate::check_choice(method, choices = c("range", "var", "sd")), + .var.name = "method", + add = collection + ) + + checkmate::assert( + checkmate::check_list( + arms, + types = "integerish", + any.missing = FALSE, + min.len = 2, + names = "unique" + ), + .var.name = "arms", + add = collection + ) + + checkmate::assert( + checkmate::check_list( + covariates, + types = c("numeric", "list", "character"), + any.missing = FALSE, + min.len = 1, + names = "unique" + ), + .var.name = "covariates3", + add = collection + ) + + response <- list() + for (c_name in names(covariates)) { + c_content <- covariates[[c_name]] + + checkmate::assert( + checkmate::check_list( + c_content, + any.missing = FALSE, + len = 2, + ), + .var.name = "covariates1", + add = collection + ) + + checkmate::assert( + checkmate::check_names( + names(c_content), + permutation.of = c("weight", "levels"), + ), + .var.name = "covariates2", + add = collection + ) + + # check covariate weight + checkmate::assert( + checkmate::check_numeric(c_content$weight, + lower = 0, + finite = TRUE, + len = 1, + null.ok = FALSE + ), + .var.name = "weight", + add = collection + ) + + checkmate::assert( + checkmate::check_character(c_content$levels, + min.chars = 1, + min.len = 2, + unique = TRUE + ), + .var.name = "levels", + add = collection + ) + } + + # check probability + checkmate::assert( + checkmate::check_numeric(p, + lower = 0, upper = 1, len = 1, + any.missing = FALSE, null.ok = FALSE + ), + .var.name = "p", + add = collection + ) + + + if (length(collection$getMessages()) > 0) { + res$status <- 400 + return(list( + error = "There was a problem with the input data to create the study", + validation_errors = collection$getMessages() + )) + } + + similar_studies <- unbiased:::get_similar_studies(name, identifier) + + strata <- purrr::imap(covariates, function(covariate, name) { + list( + name = name, + levels = covariate$levels, + value_type = "factor" + ) + }) + weights <- lapply(covariates, function(covariate) covariate$weight) + + # Write study to DB ------------------------------------------------------- + r <- unbiased:::create_study( + name = name, + identifier = identifier, + method = "minimisation_pocock", + parameters = list( + method = method, + p = p, + weights = weights + ), + arms = arms, + strata = strata + ) + + audit_log_set_study_id(r$study$id, req) + + response <- list( + study = r$study + ) + if (nrow(similar_studies) >= 1) { + response <- c(response, list(similar_studies = similar_studies)) + } + + return(response) +} diff --git a/R/api_get_randomization_list.R b/R/api_get_randomization_list.R new file mode 100644 index 0000000..e2434b1 --- /dev/null +++ b/R/api_get_randomization_list.R @@ -0,0 +1,33 @@ +api_get_rand_list <- function(study_id, req, res) { + audit_log_set_event_type("get_rand_list", req) + db_connection_pool <- get("db_connection_pool") + + study_id <- req$args$study_id + + is_study <- check_study_exist(study_id = study_id) + + if (!is_study) { + res$status <- 404 + return(list( + error = "Study not found" + )) + } + audit_log_set_study_id(study_id, req) + + patients <- + dplyr::tbl(db_connection_pool, "patient") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::left_join( + dplyr::tbl(db_connection_pool, "arm") |> + dplyr::select(arm_id = id, arm = name), + by = "arm_id" + ) |> + dplyr::select( + patient_id = id, arm, used, sys_period + ) |> + dplyr::collect() |> + dplyr::mutate(sys_period = as.character(gsub("\\[\"|\\+00\",\\)", "", sys_period))) |> + dplyr::mutate(sys_period = as.POSIXct(sys_period)) + + return(patients) +} diff --git a/R/api_get_study.R b/R/api_get_study.R new file mode 100644 index 0000000..2317db2 --- /dev/null +++ b/R/api_get_study.R @@ -0,0 +1,97 @@ +api_get_study <- function(req, res) { + audit_log_disable_for_request(req) + db_connection_pool <- get("db_connection_pool") + + study_list <- + dplyr::tbl(db_connection_pool, "study") |> + dplyr::select(study_id = id, identifier, name, method, last_edited = timestamp) |> + dplyr::collect() |> + tibble::as_tibble() + + return(study_list) +} + +api_get_study_records <- function(study_id, req, res) { + audit_log_set_event_type("get_study_record", req) + db_connection_pool <- get("db_connection_pool") + + study_id <- req$args$study_id + + if (!check_study_exist(study_id)) { + res$status <- 404 + return(list( + error = "Study not found" + )) + } + audit_log_set_study_id(study_id, req) + + study <- + dplyr::tbl(db_connection_pool, "study") |> + dplyr::filter(id == !!study_id) |> + dplyr::select( + study_id = id, name, randomization_method = method, + last_edited = timestamp, parameters + ) |> + dplyr::collect() |> + tibble::remove_rownames() + + strata <- + dplyr::tbl(db_connection_pool, "stratum") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::select(stratum_id = id, stratum_name = name, value_type) |> + collect() |> + left_join( + bind_rows( + dplyr::tbl(db_connection_pool, "factor_constraint") |> + dplyr::collect(), + dplyr::tbl(db_connection_pool, "numeric_constraint") |> + dplyr::collect() + ), + by = "stratum_id" + ) |> + tidyr::unite("value_num", c("min_value", "max_value"), + sep = " - ", na.rm = TRUE + ) |> + dplyr::mutate(value = ifelse(is.na(value), value_num, value)) |> + dplyr::select(stratum_name, value_type, value) |> + left_join( + study$parameters |> + jsonlite::fromJSON() |> + purrr::flatten_dfr() |> + select(-c(p, method)) |> + tidyr::pivot_longer( + cols = everything(), + names_to = "stratum_name", + values_to = "weight" + ), + by = "stratum_name" + ) |> + group_by(stratum_name, value_type, weight) |> + summarise(levels = list(value)) + + arms <- + dplyr::tbl(db_connection_pool, "arm") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::select(arm_name = name, ratio) |> + dplyr::collect() |> + tidyr::pivot_wider(names_from = arm_name, values_from = ratio) |> + as.list() + + study_elements <- + list( + strata = strata, + arms = arms + ) + + study_list <- c( + study |> + dplyr::select(-parameters), + study$parameters |> + jsonlite::fromJSON() |> + purrr::flatten_dfr() |> + dplyr::select(p, method), + study_elements + ) + + return(study_list) +} diff --git a/R/api_randomize.R b/R/api_randomize.R new file mode 100644 index 0000000..18db0f3 --- /dev/null +++ b/R/api_randomize.R @@ -0,0 +1,134 @@ +parse_pocock_parameters <- + function(db_connetion_pool, study_id, current_state) { + parameters <- + dplyr::tbl(db_connetion_pool, "study") |> + dplyr::filter(id == study_id) |> + dplyr::select(parameters) |> + dplyr::pull() + + parameters <- jsonlite::fromJSON(parameters) + + ratio_arms <- + dplyr::tbl(db_connetion_pool, "arm") |> + dplyr::filter(study_id == !!study_id) |> + dplyr::select(name, ratio) |> + dplyr::collect() + + params <- list( + arms = ratio_arms$name, + current_state = tibble::as_tibble(current_state), + ratio = setNames(ratio_arms$ratio, ratio_arms$name), + method = parameters$method, + p = parameters$p, + weights = parameters$weights |> unlist() + ) + + return(params) + } + +api__randomize_patient <- function(study_id, current_state, req, res) { + audit_log_set_event_type("randomize_patient", req) + collection <- checkmate::makeAssertCollection() + + db_connection_pool <- get("db_connection_pool") + + study_id <- req$args$study_id + + if (!check_study_exist(study_id)) { + res$status <- 404 + return(list( + error = "Study not found" + )) + } + + audit_log_set_study_id(study_id, req) + + # Retrieve study details, especially the ones about randomization + method_randomization <- + dplyr::tbl(db_connection_pool, "study") |> + dplyr::filter(id == study_id) |> + dplyr::select("method") |> + dplyr::pull() + + checkmate::assert( + checkmate::check_scalar(method_randomization, null.ok = FALSE), + .var.name = "method_randomization", + add = collection + ) + + checkmate::assert( + checkmate::check_data_frame(current_state, + any.missing = TRUE, + all.missing = FALSE, nrows = 2, ncols = 3 + ), + .var.name = "current_state", + add = collection + ) + + checkmate::assert( + checkmate::check_names( + colnames(current_state), + must.include = "arm" + ), + .var.name = "current_state", + add = collection + ) + + + check_arm <- function(x) { + res <- checkmate::check_character( + current_state$arm[nrow(current_state)], + max.chars = 0 + ) + if (!isTRUE(res)) { + res <- ("Last value should be empty") + } + return(res) + } + + checkmate::assert( + check_arm(), + .var.name = "current_state[arm]", + add = collection + ) + + if (length(collection$getMessages()) > 0) { + res$status <- 400 + return(list( + error = "There was a problem with the randomization preparation", + validation_errors = collection$getMessages() + )) + } + + # Dispatch based on randomization method to parse parameters + params <- + switch(method_randomization, + minimisation_pocock = do.call( + parse_pocock_parameters, list(db_connection_pool, study_id, current_state) + ) + ) + + arm_name <- + switch(method_randomization, + minimisation_pocock = do.call( + unbiased:::randomize_minimisation_pocock, params + ) + ) + + arm <- dplyr::tbl(db_connection_pool, "arm") |> + dplyr::filter(study_id == !!study_id & .data$name == arm_name) |> + dplyr::select("arm_id" = "id", "name", "ratio") |> + dplyr::collect() + + randomized_patient <- + unbiased:::save_patient(study_id, arm$arm_id, used = TRUE) |> + select(-used) + + randomized_patient <- + randomized_patient |> + dplyr::mutate(arm_name = arm$name) |> + dplyr::rename(patient_id = id) |> + as.list() + + return(randomized_patient) +} diff --git a/R/audit-trail.R b/R/audit-trail.R new file mode 100644 index 0000000..e6932a4 --- /dev/null +++ b/R/audit-trail.R @@ -0,0 +1,210 @@ +#' AuditLog Class +#' +#' This class is used internally to store audit logs for each request. +AuditLog <- R6::R6Class( # nolint: object_name_linter. + "AuditLog", + public = list( + initialize = function(request_method, endpoint_url) { + private$request_id <- uuid::UUIDgenerate() + private$request_method <- request_method + private$endpoint_url <- endpoint_url + }, + disable = function() { + private$disabled <- TRUE + }, + is_enabled = function() { + !private$disabled + }, + set_request_body = function(request_body) { + if (typeof(request_body) == "list") { + request_body <- jsonlite::toJSON(request_body, auto_unbox = TRUE) |> as.character() + } else if (!is.character(request_body)) { + request_body <- NA + } + private$request_body <- request_body + }, + set_response_body = function(response_body) { + checkmate::assert_false( + typeof(response_body) == "list" + ) + private$response_body <- response_body + }, + set_ip_address = function(ip_address) { + private$ip_address <- ip_address + }, + set_user_agent = function(user_agent) { + private$user_agent <- user_agent + }, + set_event_type = function(event_type) { + private$event_type <- event_type + }, + set_study_id = function(study_id) { + private$study_id <- study_id + }, + set_response_code = function(response_code) { + private$response_code <- response_code + }, + validate_log = function() { + checkmate::assert( + !private$disabled + ) + if (is.null(private$event_type)) { + if (private$response_code == 404) { + # "soft" validation failure for 404 errors + # it might be just invalid endpoint + # so we don't want to fail the request + return(FALSE) + } else { + stop("Event type not set for audit log. Please set the event type using `audit_log_event_type`") + } + } + return(TRUE) + }, + persist = function() { + checkmate::assert( + !private$disabled + ) + db_conn <- pool::localCheckout(db_connection_pool) + values <- list( + private$request_id, + private$event_type, + private$study_id, + private$endpoint_url, + private$request_method, + private$request_body, + private$response_code, + private$response_body, + private$ip_address, + private$user_agent + ) + + values <- purrr::map(values, \(x) ifelse(is.null(x), NA, x)) + + DBI::dbGetQuery( + db_conn, + "INSERT INTO audit_log ( + request_id, + event_type, + study_id, + endpoint_url, + request_method, + request_body, + response_code, + response_body, + ip_address, + user_agent + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", + values + ) + } + ), + private = list( + disabled = FALSE, + request_id = NULL, + event_type = NULL, + study_id = NULL, + endpoint_url = NULL, + request_method = NULL, + response_code = NULL, + request_body = NULL, + response_body = NULL, + ip_address = NULL, + user_agent = NULL + ) +) + + +#' Set up audit trail +#' +#' This function sets up an audit trail for a given process. It uses plumber's hooks to log +#' information before routing (preroute) and after serializing the response (postserialize). +#' +#' This function modifies the plumber router in place and returns the updated router. +#' +#' @param pr A plumber router for which the audit trail is to be set up. +#' @param endpoints A list of regex patterns for which the audit trail should be enabled. +#' @return Returns the updated plumber router with the audit trail hooks. +#' @examples +#' pr <- plumber::plumb("your-api-definition.R") |> +#' setup_audit_trail() +setup_audit_trail <- function(pr, endpoints = list()) { + checkmate::assert_list(endpoints, types = "character") + is_enabled_for_request <- function(req) { + any(sapply(endpoints, \(endpoint) grepl(endpoint, req$PATH_INFO))) + } + + hooks <- list( + preroute = function(req, res) { + with_err_handler({ + if (!is_enabled_for_request(req)) { + return() + } + audit_log <- AuditLog$new( + request_method = req$REQUEST_METHOD, + endpoint_url = req$PATH_INFO + ) + req$.internal.audit_log <- audit_log + }) + }, + postserialize = function(req, res) { + with_err_handler({ + audit_log <- req$.internal.audit_log + if (is.null(audit_log) || !audit_log$is_enabled()) { + return() + } + audit_log$set_response_code(res$status) + audit_log$set_request_body(req$body) + audit_log$set_response_body(res$body) + audit_log$set_ip_address(req$REMOTE_ADDR) + audit_log$set_user_agent(req$HTTP_USER_AGENT) + + log_valid <- audit_log$validate_log() + + if (log_valid) { + audit_log$persist() + } + }) + } + ) + pr |> + plumber::pr_hooks(hooks) +} + +#' Set Audit Log Event Type +#' +#' This function sets the event type for an audit log. It retrieves the audit log from the request's +#' internal data, and then calls the audit log's set_event_type method with the provided event type. +#' +#' @param event_type The event type to be set for the audit log. +#' @param req The request object, which should contain an audit log in its internal data. +#' @return Returns nothing as it modifies the audit log in-place. +audit_log_set_event_type <- function(event_type, req) { + audit_log <- req$.internal.audit_log + if (!is.null(audit_log)) { + audit_log$set_event_type(event_type) + } +} + +#' Set Audit Log Study ID +#' +#' This function sets the study ID for an audit log. It retrieves the audit log from the request's +#' internal data, and then calls the audit log's set_study_id method with the provided study ID. +#' +#' @param study_id The study ID to be set for the audit log. +#' @param req The request object, which should contain an audit log in its internal data. +#' @return Returns nothing as it modifies the audit log in-place. +audit_log_set_study_id <- function(study_id, req) { + checkmate::assert(!is.null(study_id) && is.numeric(study_id), "Study ID must be a number") + audit_log <- req$.internal.audit_log + if (!is.null(audit_log)) { + audit_log$set_study_id(study_id) + } +} + +audit_log_disable_for_request <- function(req) { + audit_log <- req$.internal.audit_log + if (!is.null(audit_log)) { + audit_log$disable() + } +} diff --git a/R/db.R b/R/db.R new file mode 100644 index 0000000..7f53acb --- /dev/null +++ b/R/db.R @@ -0,0 +1,159 @@ +#' Defines methods for interacting with the study in the database + +#' Create a database connection pool +#' +#' This function creates a connection pool to a PostgreSQL database. It uses +#' environment variables to get the necessary connection parameters. If the +#' connection fails, it will retry up to 5 times with a delay of 2 seconds +#' between each attempt. +#' +#' @return A pool object representing the connection pool to the database. +#' @export +#' +#' @examples +#' \dontrun{ +#' pool <- create_db_connection_pool() +#' } +create_db_connection_pool <- purrr::insistently(function() { + dbname <- Sys.getenv("POSTGRES_DB") + host <- Sys.getenv("POSTGRES_HOST") + port <- Sys.getenv("POSTGRES_PORT", 5432) + user <- Sys.getenv("POSTGRES_USER") + password <- Sys.getenv("POSTGRES_PASSWORD") + print( + glue::glue("Creating database connection pool to {dbname} at {host}:{port} as {user}") + ) + pool::dbPool( + RPostgres::Postgres(), + dbname = dbname, + host = host, + port = port, + user = user, + password = password + ) +}, rate = purrr::rate_delay(1, max_times = 15), quiet = FALSE) + + +get_similar_studies <- function(name, identifier) { + db_connection_pool <- get("db_connection_pool") + similar <- + dplyr::tbl(db_connection_pool, "study") |> + dplyr::select(id, name, identifier) |> + dplyr::filter(name == !!name | identifier == !!identifier) |> + dplyr::collect() + similar +} + +check_study_exist <- function(study_id) { + db_connection_pool <- get("db_connection_pool") + study_exists <- dplyr::tbl(db_connection_pool, "study") |> + dplyr::filter(id == !!study_id) |> + dplyr::collect() |> + nrow() > 0 + study_exists +} + +create_study <- function( + name, identifier, method, parameters, arms, strata) { + db_connection_pool <- get("db_connection_pool", envir = .GlobalEnv) + connection <- pool::localCheckout(db_connection_pool) + + DBI::dbWithTransaction( + connection, + { + study_record <- list( + name = name, + identifier = identifier, + method = method, + parameters = jsonlite::toJSON(parameters, auto_unbox = TRUE) + |> as.character() + ) + + study <- DBI::dbGetQuery( + connection, + "INSERT INTO study (name, identifier, method, parameters) + VALUES ($1, $2, $3, $4) + RETURNING id, name, identifier, method, parameters", + unname(study_record) + ) + + study <- as.list(study) + study$parameters <- jsonlite::fromJSON(study$parameters) + + arm_records <- arms |> + purrr::imap(\(x, name) list(name = name, ratio = x)) |> + purrr::map(tibble::as_tibble) |> + purrr::list_c() + arm_records$study_id <- study$id + + DBI::dbWriteTable( + connection, + "arm", + arm_records, + append = TRUE, + row.names = FALSE + ) + + created_arms <- DBI::dbGetQuery( + connection, + "SELECT id, study_id, name, ratio + FROM arm + WHERE study_id = $1", + study$id + ) + + study$arms <- created_arms + + stratum_records <- strata |> + purrr::imap(\(x, name) list(name = name, value_type = x$value_type)) |> + purrr::map(tibble::as_tibble) |> + purrr::list_c() + stratum_records$study_id <- study$id + + DBI::dbWriteTable( + connection, + "stratum", + stratum_records, + append = TRUE, + row.names = FALSE + ) + + created_strata <- DBI::dbGetQuery( + connection, + "SELECT id, study_id, name, value_type + FROM stratum + WHERE study_id = $1", + study$id + ) + + factor_constraints <- strata |> + purrr::imap(\(x, name) tibble::as_tibble(x)) |> + purrr::list_c() |> + dplyr::filter(.data$value_type == "factor") |> + dplyr::select(name, levels) |> + dplyr::left_join(created_strata, dplyr::join_by("name")) |> + dplyr::select(id, levels) |> + dplyr::rename(value = levels, stratum_id = id) + + DBI::dbWriteTable( + connection, + "factor_constraint", + factor_constraints, + append = TRUE, + row.names = FALSE + ) + + list(study = study) + } + ) +} + +save_patient <- function(study_id, arm_id, used) { + DBI::dbGetQuery( + db_connection_pool, + "INSERT INTO patient (arm_id, study_id, used) + VALUES ($1, $2, $3) + RETURNING id, arm_id, used", + list(arm_id, study_id, used) + ) +} diff --git a/R/error-handling.R b/R/error-handling.R new file mode 100644 index 0000000..3b8cea6 --- /dev/null +++ b/R/error-handling.R @@ -0,0 +1,121 @@ +# hack to make sure we can mock the globalCallingHandlers +# this method needs to be present in the package environment for mocking to work +# linter disabled intentionally since this is internal method and cannot be renamed +globalCallingHandlers <- NULL # nolint + +#' setup_sentry function +#' +#' This function is used to configure Sentry, a service for real-time error tracking. +#' It uses the sentryR package to set up Sentry based on environment variables. +#' +#' @param None +#' +#' @return None. If the SENTRY_DSN environment variable is not set, the function will +#' return a message and stop execution. +#' +#' @examples +#' setup_sentry() +#' +#' @details +#' The function first checks if the SENTRY_DSN environment variable is set. If not, it +#' returns a message and stops execution. +#' If SENTRY_DSN is set, it uses the sentryR::configure_sentry function to set up Sentry with +#' the following parameters: +#' - dsn: The Data Source Name (DSN) is retrieved from the SENTRY_DSN environment variable. +#' - app_name: The application name is set to "unbiased". +#' - app_version: The application version is retrieved from the GITHUB_SHA environment variable. +#' If not set, it defaults to "unspecified". +#' - environment: The environment is retrieved from the SENTRY_ENVIRONMENT environment variable. +#' If not set, it defaults to "development". +#' - release: The release is retrieved from the SENTRY_RELEASE environment variable. +#' If not set, it defaults to "unspecified". +#' +#' @seealso \url{https://docs.sentry.io/} +setup_sentry <- function() { + sentry_dsn <- Sys.getenv("SENTRY_DSN") + if (sentry_dsn == "") { + message("SENTRY_DSN not set, skipping Sentry setup") + return() + } + + sentryR::configure_sentry( + dsn = sentry_dsn, + app_name = "unbiased", + app_version = Sys.getenv("GITHUB_SHA", "unspecified"), + environment = Sys.getenv("SENTRY_ENVIRONMENT", "development"), + release = Sys.getenv("SENTRY_RELEASE", "unspecified") + ) + + globalCallingHandlers( + error = global_calling_handler + ) +} + +global_calling_handler <- function(error) { + error$function_calls <- sys.calls() + sentryR::capture_exception(error) + signalCondition(error) +} + +wrap_endpoint <- function(z) { + f <- function(...) { + return(withCallingHandlers(z(...), error = rlang::entrace)) + } + return(f) +} + +setup_invalid_json_handler <- function(api) { + api |> + plumber::pr_filter("validate_input_json", \(req, res) { + if (length(req$bodyRaw) > 0) { + request_body <- req$bodyRaw |> rawToChar() + e <- tryCatch( + { + jsonlite::fromJSON(request_body) + NULL + }, + error = \(e) e + ) + if (!is.null(e)) { + print(glue::glue("Invalid JSON; requested endpoint: {req$PATH_INFO}")) + audit_log_set_event_type("malformed_request", req) + res$status <- 400 + return(list( + error = jsonlite::unbox("Invalid JSON"), + details = e$message |> strsplit("\n") |> unlist() + )) + } + } + + plumber::forward() + }) +} + +# nocov start +default_error_handler <- function(req, res, error) { + print(error, simplify = "branch") + + if (sentryR::is_sentry_configured()) { + if ("trace" %in% names(error)) { + error$function_calls <- error$trace$call + } else if (!("function_calls" %in% names(error))) { + error$function_calls <- sys.calls() + } + + sentryR::capture_exception(error) + } + + res$status <- 500 + + list( + error = "500 - Internal server error" + ) +} +# nocov end + +with_err_handler <- function(expr) { + withCallingHandlers( + expr = expr, + error = rlang::entrace, bottom = rlang::caller_env() + ) +} diff --git a/R/randomize-minimisation-pocock.R b/R/randomize-minimisation-pocock.R new file mode 100644 index 0000000..1f01816 --- /dev/null +++ b/R/randomize-minimisation-pocock.R @@ -0,0 +1,293 @@ +#' Compare rows of two dataframes +#' +#' Takes dataframe all_patients (presumably with one row / patient) and +#' compares it to all rows of new_patients (presumably already randomized +#' patients) +#' +#' @param all_patients data.frame with all patients +#' @param new_patients data.frame with new patient +#' +#' @return data.frame with columns as in all_patients and new_patients, +#' filled with TRUE if there is match in covariate and FALSE if not +compare_rows <- function(all_patients, new_patients) { + # Find common column names + common_cols <- intersect(names(all_patients), names(new_patients)) + + # Compare each common column of A with B + comparisons <- lapply(common_cols, function(col) { + all_patients[[col]] == new_patients[[col]] + }) + + # Combine the comparisons into a new dataframe + comparison_df <- data.frame(comparisons) + names(comparison_df) <- common_cols + tibble::as_tibble(comparison_df) +} + + + +#' Patient Randomization Using Minimization Method +#' +#' \loadmathjax +#' The `randomize_dynamic` function implements the dynamic randomization +#' algorithm using the minimization method proposed by Pocock (Pocock and Simon, +#' 1975). It requires defining basic study parameters: the number of arms (K), +#' number of covariates (C), patient allocation ratios (\(a_{k}\)) +#' (where k = 1,2,
., K), weights for the covariates (\(w_{i}\)) +#' (where i = 1,2,
., C), and the maximum probability (p) of assigning a patient +#' to the group with the smallest total unbalance multiplied by +#' the respective weights (\(G_{k}\)). As the total unbalance for the first +#' patient is the same regardless of the assigned arm, this patient is randomly +#' allocated to a given arm. Subsequent patients are randomized based on the +#' calculation of the unbalance depending on the selected method: "range", +#' "var" (variance), or "sd" (standard deviation). In the case of two arms, +#' the "range" method is equivalent to the "sd" method. +#' +#' Initially, the algorithm creates a matrix of results comparing a newly +#' randomized patient with the current balance of patients based on the defined +#' covariates. In the next step, for each arm and specified covariate, +#' various scenarios of patient allocation are calculated. The existing results +#' (n) are updated with the new patient, and then, considering the ratio +#' coefficients, the results are divided by the specific allocation ratio. +#' Depending on the method, the total unbalance is then calculated, +#' taking into account the weights, and the number of covariates using one +#' of three methods (“sd”, “range”, “var”). +#' Based on the number of defined arms, the minimum value of (\(G_{k}\)) +#' (defined as the weighted sum of the level-based imbalance) selects the arm to +#' which the patient will be assigned with a predefined probability (p). The +#' probability that a patient will be assigned to any other arm will then be +#' equal (1-p)/(K-1) +#' for each of the remaining arms. + +#' @references Pocock, S. J., & Simon, R. (1975). Minimization: A new method +#' of assigning patients to treatment and control groups in clinical trials. +#' @references Minirand Package: Man Jin, Adam Polis, Jonathan Hartzel. +#' (https://CRAN.R-project.org/package=Minirand) +#' @note This function's implementation is a refactored adaptation +#' of the codebase from the 'Minirand' package. +#' +#' @inheritParams randomize_simple +#' +#' @param current_state `tibble()`\cr +#' table of covariates and current arm assignments in column `arm`, +#' last row contains the new patient with empty string for `arm` +#' @param weights `numeric()`\cr +#' vector of positive weights, equal in length to number of covariates, +#' numbered after covariates, defaults to equal weights +#' @param method `character()`\cr +#' Function used to compute within-arm variability, must be one of: +#' `sd`, `var`, `range`, defaults to `var` +#' @param p `numeric()`\cr +#' single value, proportion of randomness (0, 1) in the randomization +#' vs determinism, defaults to 85% deterministic +#' +#' @return `character()`\cr +#' name of the arm assigned to the patient +#' @examples +#' n_at_the_moment <- 10 +#' arms <- c("control", "active low", "active high") +#' sex <- sample(c("F", "M"), +#' n_at_the_moment + 1, +#' replace = TRUE, +#' prob = c(0.4, 0.6) +#' ) +#' diabetes <- +#' sample(c("diabetes", "no diabetes"), +#' n_at_the_moment + 1, +#' replace = TRUE, +#' prob = c(0.2, 0.8) +#' ) +#' arm <- +#' sample(arms, +#' n_at_the_moment, +#' replace = TRUE, +#' prob = c(0.4, 0.4, 0.2) +#' ) |> +#' c("") +#' covar_df <- tibble::tibble(sex, diabetes, arm) +#' covar_df +#' +#' randomize_minimisation_pocock(arms = arms, current_state = covar_df) +#' randomize_minimisation_pocock( +#' arms = arms, current_state = covar_df, +#' ratio = c( +#' "control" = 1, +#' "active low" = 2, +#' "active high" = 2 +#' ), +#' weights = c( +#' "sex" = 0.5, +#' "diabetes" = 1 +#' ) +#' ) +#' +#' @export +randomize_minimisation_pocock <- + function(arms, + current_state, + weights, + ratio, + method = "var", + p = 0.85) { + # Assertions + checkmate::assert_character( + arms, + min.len = 2, + min.chars = 1, + unique = TRUE + ) + + # Define a custom range function + custom_range <- function(x) { + max(x, na.rm = TRUE) - min(x, na.rm = TRUE) + } + + supported_methods <- list( + "range" = custom_range, + "var" = var, + "sd" = sd + ) + + checkmate::assert_choice( + method, + choices = names(supported_methods), + ) + checkmate::assert_tibble( + current_state, + any.missing = FALSE, + min.cols = 2, + min.rows = 1, + null.ok = FALSE + ) + checkmate::assert_names( + colnames(current_state), + must.include = "arm" + ) + checkmate::assert_character( + current_state$arm[nrow(current_state)], + max.chars = 0, .var.name = "Last value of 'arm'" + ) + + n_covariates <- + (ncol(current_state) - 1) + n_arms <- + length(arms) + + checkmate::assert_subset( + unique(current_state$arm), + choices = c(arms, ""), + .var.name = "'arm' variable in dataframe" + ) + # Validate argument presence and revert to defaults if not provided + if (rlang::is_missing(ratio)) { + ratio <- rep(1L, n_arms) + names(ratio) <- arms + } + if (rlang::is_missing(weights)) { + weights <- rep(1 / n_covariates, n_covariates) + names(weights) <- + colnames(current_state)[colnames(current_state) != "arm"] + } + + checkmate::assert_numeric( + weights, + any.missing = FALSE, + len = n_covariates, + null.ok = FALSE, + lower = 0, + finite = TRUE, + all.missing = FALSE + ) + checkmate::assert_names( + names(weights), + must.include = + colnames(current_state)[colnames(current_state) != "arm"] + ) + checkmate::assert_integerish( + ratio, + any.missing = FALSE, + len = n_arms, + null.ok = FALSE, + lower = 0, + all.missing = FALSE, + names = "named" + ) + checkmate::assert_names( + names(ratio), + must.include = arms + ) + checkmate::assert_number( + p, + na.ok = FALSE, + lower = 0, + upper = 1, + null.ok = FALSE + ) + + # Computations + n_at_the_moment <- nrow(current_state) - 1 + + if (n_at_the_moment == 0) { + return(randomize_simple(arms, ratio)) + } + + arms_similarity <- + # compare new subject to all old subjects + compare_rows( + current_state[-nrow(current_state), names(current_state) != "arm"], + current_state[nrow(current_state), names(current_state) != "arm"] + ) |> + split(current_state$arm[1:n_at_the_moment]) |> # split by arm + lapply(colSums) |> # and compute number of similarities in each arm + dplyr::bind_rows(.id = "arm") |> + # make sure that every arm has a metric, even if not present in data yet + tidyr::complete(arm = arms) |> + dplyr::mutate(dplyr::across( + dplyr::where(is.numeric), + ~ tidyr::replace_na(.x, 0) + )) + + imbalance <- sapply(arms, function(x) { + arms_similarity |> + # compute scenario where each arm (x) gets new subject + dplyr::mutate(dplyr::across( + dplyr::where(is.numeric), + ~ dplyr::if_else(arm == x, .x + 1, .x) * + ratio[arm] + )) |> + # compute dispersion across each covariate + dplyr::summarise(dplyr::across( + dplyr::where(is.numeric), + ~ supported_methods[[method]](.x) + )) |> + # multiply each covariate dispersion by covariate weight + dplyr::mutate(dplyr::across( + dplyr::everything(), + ~ . * weights[dplyr::cur_column()] + )) |> + # sum all covariate outcomes + dplyr::summarize(total = sum(dplyr::c_across(dplyr::everything()))) |> + dplyr::pull("total") + }) + + high_prob_arms <- names(which(imbalance == min(imbalance))) + low_prob_arms <- arms[!arms %in% high_prob_arms] + + if (length(high_prob_arms) == n_arms) { + return(randomize_simple(arms, ratio)) + } + + sample( + c(high_prob_arms, low_prob_arms), 1, + prob = c( + rep( + p / length(high_prob_arms), + length(high_prob_arms) + ), + rep( + (1 - p) / length(low_prob_arms), + length(low_prob_arms) + ) + ) + ) + } diff --git a/R/randomize-simple.R b/R/randomize-simple.R new file mode 100644 index 0000000..a8b558a --- /dev/null +++ b/R/randomize-simple.R @@ -0,0 +1,47 @@ +#' Simple randomization +#' +#' @description +#' Randomly assigns a patient to one of the arms according to specified ratios, +#' regardless of already performed assignments. +#' +#' @param arms `character()`\cr +#' Arm names. +#' @param ratio `integer()`\cr +#' Vector of positive integers (0 is allowed), equal in length to number +#' of arms, named after arms, defaults to equal weight +#' +#' @return Selected arm assignment. +#' +#' @examples +#' randomize_simple(c("active", "placebo"), c("active" = 2, "placebo" = 1)) +#' +#' @export +randomize_simple <- function(arms, ratio) { + # Validate argument presence and revert to defaults if not provided + if (rlang::is_missing(ratio)) { + ratio <- rep(1L, rep(length(arms))) + names(ratio) <- arms + } + + # Argument assertions + checkmate::assert_character( + arms, + any.missing = FALSE, + unique = TRUE, + min.chars = 1 + ) + + checkmate::assert_integerish( + ratio, + any.missing = FALSE, + lower = 0, + len = length(arms), + names = "named" + ) + checkmate::assert_names( + names(ratio), + must.include = arms + ) + + sample(arms, 1, prob = ratio[arms]) +} diff --git a/R/run-api.R b/R/run-api.R new file mode 100644 index 0000000..94d1993 --- /dev/null +++ b/R/run-api.R @@ -0,0 +1,45 @@ +#' Run API +#' +#' @description +#' Starts \pkg{unbiased} API. +#' +#' @param host `character(1)`\cr +#' Host URL. +#' @param port `integer(1)`\cr +#' Port to serve API under. +#' +#' @return Function called to serve the API in the caller thread. +#' +#' @export +run_unbiased <- function() { + setup_sentry() + host <- Sys.getenv("UNBIASED_HOST", "0.0.0.0") + port <- as.integer(Sys.getenv("UNBIASED_PORT", "3838")) + assign("db_connection_pool", + unbiased:::create_db_connection_pool(), + envir = globalenv() + ) + + on.exit({ + db_connection_pool <- get("db_connection_pool", envir = globalenv()) + pool::poolClose(db_connection_pool) + assign("db_connection_pool", NULL, envir = globalenv()) + }) + + # if "inst" directory is not present, we assume that the package is installed + # and inst directory content is copied to the root directory + # so we can use plumb_api method + if (!dir.exists("inst")) { + plumber::plumb_api("unbiased", "unbiased_api") |> + plumber::pr_run(host = host, port = port) + } else { + # otherwise we assume that we are in the root directory of the repository + # and we can use plumb method to run the API from the plumber.R file + + # Following line is excluded from code coverage because it is not possible to + # run the API from the plumber.R file in the test environment + # This branch is only used for local development + plumber::plumb("./inst/plumber/unbiased_api/plumber.R") |> # nocov start + plumber::pr_run(host = host, port = port) # nocov end + } +} diff --git a/R/unbiased-package.R b/R/unbiased-package.R new file mode 100644 index 0000000..8e6f8db --- /dev/null +++ b/R/unbiased-package.R @@ -0,0 +1,10 @@ +#' @import checkmate +#' @import dplyr +#' @import mathjaxr +#' +#' @keywords internal +"_PACKAGE" + +## usethis namespace: start +## usethis namespace: end +NULL diff --git a/README.md b/README.md index 1dc73f6..0f4c5a7 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,229 @@ -# unbiased -API for clinical trial randomization +# **unbiased**: An API-based solution for Clinical Trial Randomization + +In clinical trials, the fair and efficient allocation of participants is essential for achieving reliable results. While there are many excellent R randomization packages available, none, to our knowledge, provide a dedicated API for this purpose. The **unbiased** package fills this gap by featuring a production-ready REST API designed for seamless integration. This unique combination allows for easy connection with electronic Case Report Forms (eCRF), enhancing data management and streamlining participant allocation. + +## Why choose **unbiased**? + +Our goal in creating **unbiased** was to provide a user-friendly yet powerful tool that addresses the nuanced demands of clinical trial randomization. It offers: + +- **Production-Ready REST API**: Built for seamless integration with eCRF/EDC systems, facilitating real-time randomization and automation. +- **Extensive Database Integration**: Supports robust data management practices, ensuring that participant information and randomization outcomes are securely managed and easily accessible. +- **Commitment to Quality**: Emphasizes development best practices, including comprehensive code coverage, to deliver a reliable and trustworthy solution. +- **Adaptability**: Whether for small-scale studies or large, multi-center trials, **unbiased** scales to meet your needs. +- **Comprehensive Documentation**: To support you in applying the package effectively. + +By choosing **unbiased**, you're adopting a sophisticated approach to trial randomization, ensuring fair and efficient participant allocation across your studies and support of the broader objectives of clinical research through technology. + +## Table of Contents + +1. [Background](#background) + - [Purpose and Scope for Clinical Trial Randomization](#purpose-and-scope-for-clinical-trial-randomization) + - [Comparison with Other Solutions](#comparison-with-other-solutions) +2. [Quickstart Guide](#quickstart-guide) + - [Quick Setup with Docker](#quick-setup-with-docker) + - [API Configuration](#api-configuration) + - [Alternative Installation Method](#alternative-installation-method) +3. [Getting started with **unbiased**](#getting-started-with-unbiased) + - [API Endpoints](#api-endpoints) + - [Study Creation](#study-creation) + - [Patient Randomization](#patient-randomization) +4. [Technical Implementation](#technical-implementation) + - [Quality Assurance Measures](#quality-assurance-measures) + - [Running Tests](#running-tests) + - [Executing Tests from an R Interactive Session](#executing-tests-from-an-r-interactive-session) + - [Executing Tests from the Command Line](#executing-tests-from-the-command-line) + - [Running Tests with Docker Compose](#running-tests-with-docker-compose) + - [Code Coverage](#code-coverage) + - [Configuring Sentry](#configuring-sentry) + + +# Background + +## Purpose and Scope for Clinical Trial Randomization + +Randomization is the gold standard for conducting clinical trials and a fundamental aspect of clinical trials, in studies comparing two or more arms. In most cases randomization is a desirable technique that will ensure that patients are randomly allocated to defined groups. This is essential for maintaining the integrity of the trial and ensuring that the results are reliable, and blinding of research personnel. However, there are situations where it is desirable for studies to balance patients in terms of numbers in each group or, in addition, to achieve balance with respect to other relevant factors, such as sex or diabetes type. Adequate selection of randomization methods allows the intended randomization goals to be realized. + +**Unbiased** compared to standard and most commonly used randomization methods, e.g. the simple method or the block method, apart from these methods, additionally offers enhanced features of more flexible adaptive methods, which are based on current information about the allocation of patients in the trial. Compared to, for example, block randomization, adaptive randomization not only ensures relatively equal allocation to patient groups, but also allows the groups to be balanced on the basis of certain important covariates, which is its key advantage. This randomization requires predefined criteria, such as the probability with which a given patient will be assigned to a group based on minimizing the total imbalance, or weights that can be assigned personally for each individual covariate. Its advanced algorithmic approach sets it apart from others by minimizing selection bias and improving the overall efficiency of the randomization process in clinical trials. + +**Unbiased** allows the use of simple, block and adaptive minimization randomization methods relevant to the conduct of clinical trials, so package caters to the needs of clinical trial randomization. +... + +To find out more on differences in randomization methods, read our vignette on [Comparative Analysis of Randomization Methods](vignettes/articles/minimization_randomization_comparison.Rmd). + +## Comparison with other solutions + +There are many packages that perform specific randomization methods in R. Most of them are designed for stratified randomization and permuted blocks, such as [blockrand](https://CRAN.R-project.org/package=blockrand) and [randomizeR](https://CRAN.R-project.org/package=randomizeR). Some of them also utilize the options for using minimization randomization - e.g. [randpack]( https://bioconductor.org/packages/randPack/) or [Minirand]( https://CRAN.R-project.org/package=Minirand). + +Our unique contribution to the landscape is the integration of a comprehensive API and a commitment to rigorous testing. This dual focus ensures that **unbiased** not only supports the practical needs of clinical trials, but also aligns with the technical requirements of modern clinical research environments. By prioritizing these aspects, **unbiased** addresses a critical gap in the market: the need for an eCRF-compatible randomization solution that is both dependable and easily integrated into existing workflows. This, together with the implementation of minimization techniques, sets **unbiased** apart as a novel, comprehensive tool. + +# Quickstart Guide + +Initiating your work with **unbiased** involves simple setup steps. Whether you're integrating it into your R environment or deploying its API, we aim to equip you with a reliable tool that enhances the integrity and efficiency of your clinical trials. + +## Quick Setup with Docker + +The most straightforward way to deploy **unbiased** is through our Docker images. This ensures that you can get **unbiased** up and running with minimal setup, regardless of your local environment. To use **unbiased**, pull the latest Docker image: + +```sh +docker pull ghcr.io/ttscience/unbiased +``` + +To run **unbiased** with Docker, ensuring you have set the necessary environment variables: + +```sh +docker run -e POSTGRES_DB=mydb -e POSTGRES_USER=myuser -e POSTGRES_PASSWORD=mypassword -e UNBIASED_PORT=3838 ghcr.io/ttscience/unbiased +``` + +This command starts the **unbiased** API, making it accessible on the specified port. It's crucial to have your PostgreSQL database ready, as **unbiased** will automatically configure the necessary database structures upon startup. + +## API configuration + +The **unbiased** API server can be configured using environment variables. The following environment variables need to be set for the server to start: + +- `POSTGRES_DB`: The name of the PostgreSQL database to connect to. +- `POSTGRES_HOST`: The host of the PostgreSQL database. This could be a hostname, such as `localhost` or `database.example.com`, or an IP address. +- `POSTGRES_PORT`: The port on which the PostgreSQL database is listening. Defaults to `5432` if not provided. +- `POSTGRES_USER`: The username for authentication with the PostgreSQL database. +- `POSTGRES_PASSWORD`: The password for authentication with the PostgreSQL database. +- `UNBIASED_HOST`: The host on which the API will run. Defaults to `0.0.0.0` if not provided. +- `UNBIASED_PORT`: The port on which the API will listen. Defaults to `3838` if not provided. + +## Alternative Installation Method + +For those preferring to work directly within the R environment, the **unbiased** package offers an alternative installation method via GitHub. This approach allows users to easily integrate **unbiased** into their R projects. To proceed with this method, utilize the `devtools` package for installation by executing the following command: + +```R +devtools::install_github("ttscience/unbiased") +``` + +Following the package installation, the **unbiased** API can be launched within R. Simply invoke the `run_unbiased()` function to start the API: + +```R +unbiased::run_unbiased() +``` + +This initiates the API server, by default, on your local machine (http://localhost:3838), making it accessible for interaction through various HTTP clients, including curl, Postman, or R's `httr` package. + + +# Getting started with **unbiased** + +The **unbiased** package offers functions for randomizing participants in clinical trials, ensuring a fair and transparent process. + +Complete documentation for the implemented methodology and examples of how to use them are available on our GitHub Pages, providing all the information you need to integrate **unbiased** into your trial management workflow. Below, we present the basic steps for using **unbiased** through the API. + +## API Endpoints + +The **unbiased** API is designed to facilitate clinical trial management through a set of endpoints: + +- **Study Management**: Create and configure new studies, including specifying randomization parameters and treatment arms. +- **Participant Randomization**: Dynamically randomize participants to treatment groups based on the study's configuration and existing participant data. + + +### Study Creation + +To initialize a study using Pocock's minimization method, use the POST /minimisation_pocock endpoint. The required JSON payload should detail the study, including treatment groups, allocation ratios, and covariates. + +```R +# Initialize a study with Pocock's minimisation method +response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "My_study_1", + name = "Study 1", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "treatment" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + age = list( + weight = 1, + levels = c("up to 50", "51 or more") + ) + ) + ) + ) +``` + +This call sets up the study and returns an ID for accessing further study-related endpoints. + +### Patient Randomization + +The POST /{study_id}/patient endpoint assigns a new patient to a treatment group, requiring patient details and covariate information in the JSON payload. + +```R +# Randomize a new patient +req_url_path("study", my_study_id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female"), + "age" = c("up to 50"), + "arm" = c("") + ) + ) + ) +``` + +This endpoint determines the patient's treatment group. + +# Technical details + +## Running Tests + +Unbiased provides an extensive collection of tests to ensure correct functionality. + +### Executing Tests from an R Interactive Session + +To execute tests using an interactive R session, run the following commands: + +```R +devtools::load_all() +testthat::test_package(**unbiased**) +``` + +Make sure that `devtools` package is installed in your environment. + +Ensure that the necessary database connection environment variables are set before running these tests. You can set environment variables using methods such as `Sys.setenv`. + +Running these tests will start the Unbiased API on a random port. + +### Executing Tests from the Command Line + +Use the helper script `run_tests.sh` to execute tests from the command line. Remember to set the database connection environment variables before running the tests. + +### Running Tests with Docker Compose + +Docker Compose can be used to build the Unbiased Docker image and execute all tests. This can be done using the provided `docker-compose.test.yml` file. This method ensures a consistent testing environment and simplifies the setup process. + +```bash +docker compose -f docker-compose.test.yml build +docker compose -f docker-compose.test.yml run tests +``` + +### Code Coverage + +Unbiased supports code coverage analysis through the `covr` package. This allows you to measure the effectiveness of your tests by showing which parts of your R code in the `R` directory are actually being tested. + +To calculate code coverage, you will need to install the `covr` package. Once installed, you can use the following methods: + +- `covr::report()`: This method runs all tests and generates a detailed coverage report in HTML format. +- `covr::package_coverage()`: This method provides a simpler, text-based code coverage report. + +Alternatively, you can use the provided `run_tests_with_coverage.sh` script to run Unbiased tests with code coverage. + +### Configuring Sentry +The Unbiased server offers robust error reporting capabilities through the integration of the Sentry service. To activate Sentry, simply set the `SENTRY_DSN` environment variable. Additionally, you have the flexibility to customize the setup further by configuring the following environment variables: + +* `SENTRY_ENVIRONMENT` This is used to set the environment (e.g., "production", "staging", "development"). If not set, the environment defaults to "development". + +* `SENTRY_RELEASE` This is used to set the release in Sentry. If not set, the release defaults to "unspecified". diff --git a/_pkgdown.yml b/_pkgdown.yml new file mode 100644 index 0000000..c4c0d89 --- /dev/null +++ b/_pkgdown.yml @@ -0,0 +1,4 @@ +url: https://ttscience.github.io/unbiased/ +template: + bootstrap: 5 + diff --git a/api/meta.R b/api/meta.R deleted file mode 100644 index 87f6820..0000000 --- a/api/meta.R +++ /dev/null @@ -1,10 +0,0 @@ -#* Github commit SHA -#* -#* Each release of the API is based on some Github commit. This endpoint allows -#* the user to easily check the SHA of the deployed API version. -#* -#* @get /sha -#* @serializer unboxedJSON -function() { - Sys.getenv("GITHUB_SHA", unset = "") -} diff --git a/api/plumber.R b/api/plumber.R deleted file mode 100644 index c798871..0000000 --- a/api/plumber.R +++ /dev/null @@ -1,9 +0,0 @@ -#* @plumber -function(api) { - rand_simple <- plumber::pr("randomize-simple.R") - meta <- plumber::pr("meta.R") - - api |> - plumber::pr_mount("/simple", rand_simple) |> - plumber::pr_mount("/meta", meta) -} diff --git a/api/randomize-simple.R b/api/randomize-simple.R deleted file mode 100644 index 9406713..0000000 --- a/api/randomize-simple.R +++ /dev/null @@ -1,11 +0,0 @@ -#* Return hello world -#* -#* @get /hello -#* @serializer unboxedJSON -function() { - call_hello_world() -} - -call_hello_world <- function() { - "Hello TTSI!" -} diff --git a/autoreload.sh b/autoreload.sh new file mode 100755 index 0000000..a7ac3de --- /dev/null +++ b/autoreload.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +COMMAND=$1 + +echo "Running $COMMAND" + +watchmedo auto-restart \ + --patterns="*.R;*.txt" \ + --ignore-patterns="renv" \ + --recursive \ + --directory="./R" \ + --directory="./inst" \ + --directory="./tests" \ + --debounce-interval 1 \ + --no-restart-on-command-exit \ + "$@" \ No newline at end of file diff --git a/autoreload_polling.sh b/autoreload_polling.sh new file mode 100644 index 0000000..a0b9b3d --- /dev/null +++ b/autoreload_polling.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e + +COMMAND=$1 + +echo "Running $COMMAND" + +watchmedo auto-restart \ + --patterns="*.R;*.txt" \ + --ignore-patterns="renv" \ + --recursive \ + --directory="./R" \ + --directory="./inst" \ + --directory="./tests" \ + --debounce-interval 1 \ + --debug-force-polling \ + -v \ + --no-restart-on-command-exit \ + "$@" \ No newline at end of file diff --git a/docker-compose.test.yaml b/docker-compose.test.yaml deleted file mode 100644 index df8aeb5..0000000 --- a/docker-compose.test.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: "3.9" -services: - api: - image: unbiased - container_name: unbiased_api - networks: - - test_net - tests: - image: unbiased - container_name: unbiased_tests - depends_on: - - api - environment: - - CI=true - networks: - - test_net - volumes: - - type: bind - source: ./tests - target: /src/unbiased/tests - command: Rscript tests/testthat.R - -networks: - test_net: diff --git a/docker-compose.test.yml b/docker-compose.test.yml new file mode 100644 index 0000000..a44b094 --- /dev/null +++ b/docker-compose.test.yml @@ -0,0 +1,19 @@ +version: "3.9" +services: + postgres: + image: ghcr.io/ttscience/postgres-temporal-tables/postgres-temporal-tables:latest + environment: + - POSTGRES_PASSWORD=postgres + tests: + build: + context: . + dockerfile: Dockerfile + depends_on: + - postgres + environment: + - POSTGRES_DB=postgres + - POSTGRES_HOST=postgres + - POSTGRES_PORT=5432 + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + command: R -e "testthat::test_package('unbiased')" diff --git a/inst/db/migrations/000001_initialize_temporal_tables_extension.down.sql b/inst/db/migrations/000001_initialize_temporal_tables_extension.down.sql new file mode 100644 index 0000000..a7ff547 --- /dev/null +++ b/inst/db/migrations/000001_initialize_temporal_tables_extension.down.sql @@ -0,0 +1 @@ +DROP EXTENSION temporal_tables; diff --git a/inst/db/migrations/000001_initialize_temporal_tables_extension.up.sql b/inst/db/migrations/000001_initialize_temporal_tables_extension.up.sql new file mode 100644 index 0000000..0f70cfe --- /dev/null +++ b/inst/db/migrations/000001_initialize_temporal_tables_extension.up.sql @@ -0,0 +1 @@ +CREATE EXTENSION temporal_tables; diff --git a/inst/db/migrations/20240129082653_create_tables.down.sql b/inst/db/migrations/20240129082653_create_tables.down.sql new file mode 100644 index 0000000..18deffd --- /dev/null +++ b/inst/db/migrations/20240129082653_create_tables.down.sql @@ -0,0 +1,8 @@ +DROP TABLE patient_stratum; +DROP TABLE patient; +DROP TABLE numeric_constraint; +DROP TABLE factor_constraint; +DROP TABLE stratum_level; +DROP TABLE stratum; +DROP TABLE arm; +DROP TABLE study; \ No newline at end of file diff --git a/inst/db/migrations/20240129082653_create_tables.up.sql b/inst/db/migrations/20240129082653_create_tables.up.sql new file mode 100644 index 0000000..3ef3774 --- /dev/null +++ b/inst/db/migrations/20240129082653_create_tables.up.sql @@ -0,0 +1,174 @@ +CREATE TABLE study ( + id SERIAL PRIMARY KEY, + identifier VARCHAR(12) NOT NULL, + name VARCHAR(255) NOT NULL, + method VARCHAR(255) NOT NULL, + parameters JSONB, + timestamp TIMESTAMPTZ NOT NULL DEFAULT now(), + sys_period TSTZRANGE NOT NULL +); + +COMMENT ON TABLE study IS 'Stores information about various studies conducted.'; +COMMENT ON COLUMN study.id IS 'An auto-incrementing primary key uniquely identifying each study.'; +COMMENT ON COLUMN study.identifier IS 'A unique, short textual identifier for the study (max 12 characters).'; +COMMENT ON COLUMN study.name IS 'Provides the full name or title of the study.'; +COMMENT ON COLUMN study.method IS 'A randomization method name.'; +COMMENT ON COLUMN study.parameters IS 'JSONB column to store parameters related to the study.'; +COMMENT ON COLUMN study.timestamp IS 'Timestamp of when the record was created, defaults to current time.'; +COMMENT ON COLUMN study.sys_period IS 'TSTZRANGE type used for temporal versioning to track the validity period of each record.'; + +CREATE TABLE arm ( + id SERIAL PRIMARY KEY, + study_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + ratio INT NOT NULL DEFAULT 1, + sys_period TSTZRANGE NOT NULL, + CONSTRAINT arm_study + FOREIGN KEY (study_id) + REFERENCES study (id) ON DELETE CASCADE, + CONSTRAINT uc_arm_study + UNIQUE (id, study_id), + CONSTRAINT ratio_positive + CHECK (ratio > 0) +); + +COMMENT ON TABLE arm IS 'Represents the treatment arms within each study.'; +COMMENT ON COLUMN arm.id IS 'An auto-incrementing primary key that uniquely identifies each arm.'; +COMMENT ON COLUMN arm.study_id IS 'A foreign key that links each arm to its corresponding study.'; +COMMENT ON COLUMN arm.name IS 'Provides a descriptive name for the treatment arm.'; +COMMENT ON COLUMN arm.ratio IS 'Specifies the proportion of patients allocated to this arm. It defaults to 1 and must always be positive.'; +COMMENT ON COLUMN arm.sys_period IS 'TSTZRANGE type used for temporal versioning to track the validity period of each record.'; + +CREATE TABLE stratum ( + id SERIAL PRIMARY KEY, + study_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + value_type VARCHAR(12), + sys_period TSTZRANGE NOT NULL, + CONSTRAINT fk_study + FOREIGN KEY (study_id) + REFERENCES study (id) ON DELETE CASCADE, + CONSTRAINT chk_value_type + CHECK (value_type IN ('factor', 'numeric')) +); + +COMMENT ON TABLE stratum IS 'Defines the strata for patient categorization within each study.'; + +COMMENT ON COLUMN stratum.id IS 'An auto-incrementing primary key that uniquely identifies each stratum.'; +COMMENT ON COLUMN stratum.study_id IS 'A foreign key that links the stratum to a specific study.'; +COMMENT ON COLUMN stratum.name IS 'Provides a descriptive name for the stratum, such as a particular demographic or clinical characteristic.'; +COMMENT ON COLUMN stratum.value_type IS 'Indicates the type of value the stratum represents, limited to two types: ''factor'' or ''numeric''. ''factor'' represents categorical data, while ''numeric'' represents numerical data. This distinction is crucial as it informs the data validation logic applied in the system.'; +COMMENT ON COLUMN stratum.sys_period IS 'TSTZRANGE type used for temporal versioning to track the validity period of each record.'; + +CREATE TABLE stratum_level ( + stratum_id INT NOT NULL, + level VARCHAR(255) NOT NULL, + CONSTRAINT fk_stratum_level + FOREIGN KEY (stratum_id) + REFERENCES stratum (id) ON DELETE CASCADE, + CONSTRAINT uc_stratum_level + UNIQUE (stratum_id, level) +); +COMMENT ON TABLE stratum_level IS 'Keeps allowed stratum factor levels.'; + +COMMENT ON COLUMN stratum_level.stratum_id IS 'A foreign key that links the stratum level to a specific stratum.'; +COMMENT ON COLUMN stratum_level.level IS 'Level label, has to be unique within stratum.'; + +CREATE TABLE factor_constraint ( + stratum_id INT NOT NULL, + value VARCHAR(255) NOT NULL, + sys_period TSTZRANGE NOT NULL, + CONSTRAINT factor_stratum + FOREIGN KEY (stratum_id) + REFERENCES stratum (id) ON DELETE CASCADE, + CONSTRAINT uc_stratum_value + UNIQUE (stratum_id, value) +); + +COMMENT ON TABLE factor_constraint IS 'Defines constraints for strata of the ''factor'' type in studies. This table stores allowable values for each factor stratum, ensuring data consistency and integrity.'; + +COMMENT ON COLUMN factor_constraint.stratum_id IS 'A foreign key that links the constraint to a specific stratum in the ''stratum'' table.'; +COMMENT ON COLUMN factor_constraint.value IS 'Represents the specific allowable value for the factor stratum. This could be a categorical label like ''male'' or ''female'' for a gender stratum, for example.'; +COMMENT ON COLUMN factor_constraint.sys_period IS 'TSTZRANGE type used for temporal versioning to track the validity period of each record.'; + +CREATE TABLE numeric_constraint ( + stratum_id INT NOT NULL, + min_value FLOAT, + max_value FLOAT, + sys_period TSTZRANGE NOT NULL, + CONSTRAINT numeric_stratum + FOREIGN KEY (stratum_id) + REFERENCES stratum (id) ON DELETE CASCADE, + CONSTRAINT uc_stratum + UNIQUE (stratum_id), + CONSTRAINT chk_min_max + -- NULL is ok in checks, no need to test for it + CHECK (min_value <= max_value) +); + +COMMENT ON TABLE numeric_constraint IS 'Specifies constraints for strata of the ''numeric'' type in studies. This table defines the permissible range (minimum and maximum values) for each numeric stratum.'; + +COMMENT ON COLUMN numeric_constraint.stratum_id IS 'A foreign key that links the constraint to a specific numeric stratum in the ''stratum'' table.'; +COMMENT ON COLUMN numeric_constraint.min_value IS 'Defines the minimum allowable value for the stratum''s numeric values. Can be NULL, indicating that there is no lower bound.'; +COMMENT ON COLUMN numeric_constraint.max_value IS 'Defines the maximum allowable value for the stratum''s numeric values. Can be NULL, indicating that there is no upper bound.'; +COMMENT ON COLUMN numeric_constraint.sys_period IS 'TSTZRANGE type used for temporal versioning to track the validity period of each record.'; + +CREATE TABLE patient ( + id SERIAL PRIMARY KEY, + study_id INT NOT NULL, + arm_id INT, + used BOOLEAN NOT NULL DEFAULT false, + -- timestamp TIMESTAMPTZ NOT NULL DEFAULT now(), + sys_period TSTZRANGE NOT NULL, + CONSTRAINT patient_arm_study + FOREIGN KEY (arm_id, study_id) + REFERENCES arm (id, study_id) ON DELETE CASCADE, + CONSTRAINT used_with_arm + CHECK (NOT used OR arm_id IS NOT NULL) +); + + +COMMENT ON TABLE patient IS 'Represents individual patients participating in the studies.'; +COMMENT ON COLUMN patient.id IS 'An auto-incrementing primary key that uniquely identifies each patient.'; +COMMENT ON COLUMN patient.study_id IS 'A foreign key linking the patient to a specific study.'; +COMMENT ON COLUMN patient.arm_id IS 'An optional foreign key that links the patient to a specific treatment arm within the study.'; +COMMENT ON COLUMN patient.used IS 'A boolean flag indicating the state of the patient in the randomization process.'; +COMMENT ON COLUMN patient.sys_period IS 'Type TSTZRANGE, used for temporal versioning to track the validity period of each record.'; +COMMENT ON CONSTRAINT patient_arm_study ON patient IS 'Ensures referential integrity between patients, studies, and arms. It also cascades deletions to maintain consistency when a study or arm is deleted.'; +COMMENT ON CONSTRAINT used_with_arm ON patient IS 'Ensures logical consistency by allowing ''used'' to be true only if the patient is assigned to an arm (i.e., ''arm_id'' is not NULL). This prevents scenarios where a patient is marked as used but not assigned to any treatment arm.'; + + +CREATE TABLE patient_stratum ( + patient_id INT NOT NULL, + stratum_id INT NOT NULL, + fct_value VARCHAR(255), + num_value FLOAT, + sys_period TSTZRANGE NOT NULL, + CONSTRAINT fk_patient + FOREIGN KEY (patient_id) + REFERENCES patient (id) ON DELETE CASCADE, + CONSTRAINT fk_stratum_2 + FOREIGN KEY (stratum_id) + REFERENCES stratum (id) ON DELETE CASCADE, + CONSTRAINT chk_value_exists + -- Either factor or numeric value must be given + CHECK (fct_value IS NOT NULL OR num_value IS NOT NULL), + CONSTRAINT chk_one_value_only + -- Can't give both factor and numeric value + CHECK (fct_value IS NULL OR num_value IS NULL), + CONSTRAINT uc_patient_stratum + UNIQUE (patient_id, stratum_id) +); + + +COMMENT ON TABLE patient_stratum IS 'Associates patients with specific strata and records the corresponding stratum values.'; +COMMENT ON COLUMN patient_stratum.patient_id IS 'A foreign key that links to the ''patient'' table, identifying the patient.'; +COMMENT ON COLUMN patient_stratum.stratum_id IS 'A foreign key that links to the ''stratum'' table, identifying the stratum to which the patient belongs.'; +COMMENT ON COLUMN patient_stratum.fct_value IS 'Stores the categorical (factor) value for the patient in the corresponding stratum, if applicable.'; +COMMENT ON COLUMN patient_stratum.num_value IS 'Stores the numerical value for the patient in the corresponding stratum, if applicable.'; +COMMENT ON COLUMN patient_stratum.sys_period IS 'Type TSTZRANGE, used for temporal versioning to track the validity period of each record.'; +COMMENT ON CONSTRAINT fk_patient ON patient_stratum IS 'Links each patient-stratum pairing to the respective tables.'; +COMMENT ON CONSTRAINT fk_stratum_2 ON patient_stratum IS 'Links each patient-stratum pairing to the respective tables.'; +COMMENT ON CONSTRAINT chk_value_exists ON patient_stratum IS 'Ensures that either a factor or numeric value is provided for each record, aligning with the nature of the stratum.'; +COMMENT ON CONSTRAINT chk_one_value_only ON patient_stratum IS 'Ensures that each record has either a factor or a numeric value, but not both, maintaining the integrity of the data by ensuring it matches the stratum type (factor or numeric).'; +COMMENT ON CONSTRAINT uc_patient_stratum ON patient_stratum IS 'Ensures that each patient-stratum pairing is unique.'; diff --git a/inst/db/migrations/20240129082842_main_data_validation.down.sql b/inst/db/migrations/20240129082842_main_data_validation.down.sql new file mode 100644 index 0000000..4ffef4b --- /dev/null +++ b/inst/db/migrations/20240129082842_main_data_validation.down.sql @@ -0,0 +1,14 @@ +DROP TRIGGER patient_num_constraint ON patient_stratum; +DROP FUNCTION check_num_patient(); + +DROP TRIGGER patient_fct_constraint ON patient_stratum; +DROP FUNCTION check_fct_patient(); + +DROP TRIGGER patient_stratum_study_constraint ON patient_stratum; +DROP FUNCTION check_patient_stratum_study(); + +DROP TRIGGER stratum_num_constraint ON numeric_constraint; +DROP FUNCTION check_num_stratum(); + +DROP TRIGGER stratum_fct_constraint ON factor_constraint; +DROP FUNCTION check_fct_stratum(); \ No newline at end of file diff --git a/inst/db/migrations/20240129082842_main_data_validation.up.sql b/inst/db/migrations/20240129082842_main_data_validation.up.sql new file mode 100644 index 0000000..7d89f55 --- /dev/null +++ b/inst/db/migrations/20240129082842_main_data_validation.up.sql @@ -0,0 +1,134 @@ +-- Stratum constraint checks + +CREATE FUNCTION check_fct_stratum() +RETURNS trigger AS $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM stratum + -- Checks that column value is correct + WHERE id = NEW.stratum_id AND value_type = 'factor' + ) THEN + RAISE EXCEPTION 'Can''t set factor constraint for non-factor stratum.'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER stratum_fct_constraint +BEFORE INSERT ON factor_constraint +FOR EACH ROW +EXECUTE PROCEDURE check_fct_stratum(); + + +CREATE FUNCTION check_num_stratum() +RETURNS trigger AS $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM stratum + -- Checks that column value is correct + WHERE id = NEW.stratum_id AND value_type = 'numeric' + ) THEN + RAISE EXCEPTION 'Can''t set numeric constraint for non-numeric stratum.'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER stratum_num_constraint +BEFORE INSERT ON numeric_constraint +FOR EACH ROW +EXECUTE PROCEDURE check_num_stratum(); + +-- Patient stratum value checks + +-- Ensure that patients and strata are assigned to the same study. +CREATE FUNCTION check_patient_stratum_study() +RETURNS trigger AS $$ +BEGIN + DECLARE + patient_study INT := ( + SELECT study_id FROM patient + WHERE id = NEW.patient_id + ); + stratum_study INT := ( + SELECT study_id FROM stratum + WHERE id = NEW.stratum_id + ); + BEGIN + IF (patient_study <> stratum_study) THEN + RAISE EXCEPTION 'Stratum and patient must be assigned to the same study.'; + END IF; + END; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER patient_stratum_study_constraint +BEFORE INSERT ON patient_stratum +FOR EACH ROW +EXECUTE PROCEDURE check_patient_stratum_study(); + +-- Validate and enforce factor stratum values. +CREATE FUNCTION check_fct_patient() +RETURNS trigger AS $$ +BEGIN + IF EXISTS ( + SELECT 1 FROM stratum + WHERE id = NEW.stratum_id AND value_type = 'factor' + ) THEN + IF (NEW.fct_value IS NULL) THEN + RAISE EXCEPTION 'Factor stratum requires a factor value.'; + END IF; + IF NOT EXISTS ( + SELECT 1 FROM factor_constraint + WHERE stratum_id = NEW.stratum_id AND value = NEW.fct_value + ) THEN + RAISE EXCEPTION 'Factor value not specified as allowed.'; + END IF; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER patient_fct_constraint +BEFORE INSERT ON patient_stratum +FOR EACH ROW +EXECUTE PROCEDURE check_fct_patient(); + +-- Validate and enforce numeric stratum values within specified constraints. +CREATE FUNCTION check_num_patient() +RETURNS trigger AS $$ +BEGIN + IF EXISTS ( + SELECT 1 FROM stratum + WHERE id = NEW.stratum_id AND value_type = 'numeric' + ) THEN + IF (NEW.num_value IS NULL) THEN + RAISE EXCEPTION 'Numeric stratum requires a numeric value.'; + END IF; + DECLARE + min_value FLOAT := ( + SELECT min_value FROM numeric_constraint + WHERE stratum_id = NEW.stratum_id + ); + max_value FLOAT := ( + SELECT max_value FROM numeric_constraint + WHERE stratum_id = NEW.stratum_id + ); + BEGIN + IF (min_value IS NOT NULL AND NEW.num_value < min_value) THEN + RAISE EXCEPTION 'New value is lower than minimum allowed value.'; + END IF; + IF (max_value IS NOT NULL AND NEW.num_value > max_value) THEN + RAISE EXCEPTION 'New value is greater than maximum allowed value.'; + END IF; + END; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER patient_num_constraint +BEFORE INSERT ON patient_stratum +FOR EACH ROW +EXECUTE PROCEDURE check_num_patient(); diff --git a/inst/db/migrations/20240129084925_versioning.down.sql b/inst/db/migrations/20240129084925_versioning.down.sql new file mode 100644 index 0000000..73a9d56 --- /dev/null +++ b/inst/db/migrations/20240129084925_versioning.down.sql @@ -0,0 +1,20 @@ +DROP TRIGGER patient_stratum_versioning ON patient_stratum; +DROP TABLE patient_stratum_history; + +DROP TRIGGER patient_versioning ON patient; +DROP TABLE patient_history; + +DROP TRIGGER num_constraint_versioning ON numeric_constraint; +DROP TABLE numeric_constraint_history; + +DROP TRIGGER fct_constraint_versioning ON factor_constraint; +DROP TABLE factor_constraint_history; + +DROP TRIGGER stratum_versioning ON stratum; +DROP TABLE stratum_history; + +DROP TRIGGER arm_versioning ON arm; +DROP TABLE arm_history; + +DROP TRIGGER study_versioning ON study; +DROP TABLE study_history; \ No newline at end of file diff --git a/inst/db/migrations/20240129084925_versioning.up.sql b/inst/db/migrations/20240129084925_versioning.up.sql new file mode 100644 index 0000000..9572597 --- /dev/null +++ b/inst/db/migrations/20240129084925_versioning.up.sql @@ -0,0 +1,48 @@ +CREATE TABLE study_history (LIKE study); + +CREATE TRIGGER study_versioning +BEFORE INSERT OR UPDATE OR DELETE ON study +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'study_history', true); + +CREATE TABLE arm_history (LIKE arm); + +CREATE TRIGGER arm_versioning +BEFORE INSERT OR UPDATE OR DELETE ON arm +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'arm_history', true); + +CREATE TABLE stratum_history (LIKE stratum); + +CREATE TRIGGER stratum_versioning +BEFORE INSERT OR UPDATE OR DELETE ON stratum +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'stratum_history', true); + +CREATE TABLE factor_constraint_history (LIKE factor_constraint); + +CREATE TRIGGER fct_constraint_versioning +BEFORE INSERT OR UPDATE OR DELETE ON factor_constraint +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'factor_constraint_history', true); + +CREATE TABLE numeric_constraint_history (LIKE numeric_constraint); + +CREATE TRIGGER num_constraint_versioning +BEFORE INSERT OR UPDATE OR DELETE ON numeric_constraint +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'numeric_constraint_history', true); + +CREATE TABLE patient_history (LIKE patient); + +CREATE TRIGGER patient_versioning +BEFORE INSERT OR UPDATE OR DELETE ON patient +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'patient_history', true); + +CREATE TABLE patient_stratum_history (LIKE patient_stratum); + +CREATE TRIGGER patient_stratum_versioning +BEFORE INSERT OR UPDATE OR DELETE ON patient_stratum +FOR EACH ROW +EXECUTE PROCEDURE versioning('sys_period', 'patient_stratum_history', true); diff --git a/inst/db/migrations/20240216102753_audit_trail.down.SQL b/inst/db/migrations/20240216102753_audit_trail.down.SQL new file mode 100644 index 0000000..4a15498 --- /dev/null +++ b/inst/db/migrations/20240216102753_audit_trail.down.SQL @@ -0,0 +1,2 @@ +DROP INDEX audit_log_study_id_idx; +DROP TABLE audit_log; diff --git a/inst/db/migrations/20240216102753_audit_trail.up.SQL b/inst/db/migrations/20240216102753_audit_trail.up.SQL new file mode 100644 index 0000000..c267f59 --- /dev/null +++ b/inst/db/migrations/20240216102753_audit_trail.up.SQL @@ -0,0 +1,17 @@ +CREATE TABLE audit_log ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, + event_type TEXT NOT NULL, + request_id UUID NOT NULL, + study_id integer, + endpoint_url TEXT NOT NULL, + request_method TEXT NOT NULL, + request_body JSONB, + response_code integer NOT NULL, + response_body JSONB, + CONSTRAINT audit_log_study_id_fk + FOREIGN KEY (study_id) + REFERENCES study (id) +); + +CREATE INDEX audit_log_study_id_idx ON audit_log (study_id); diff --git a/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.down.sql b/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.down.sql new file mode 100644 index 0000000..d4baee8 --- /dev/null +++ b/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE audit_log DROP COLUMN ip_address; +ALTER TABLE audit_log DROP COLUMN user_agent; \ No newline at end of file diff --git a/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.up.sql b/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.up.sql new file mode 100644 index 0000000..aa15654 --- /dev/null +++ b/inst/db/migrations/20240304105844_add_ip_address_and_user_agent_to_audit_log.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE audit_log ADD COLUMN ip_address VARCHAR(255); +ALTER TABLE audit_log ADD COLUMN user_agent TEXT; \ No newline at end of file diff --git a/inst/plumber/unbiased_api/meta.R b/inst/plumber/unbiased_api/meta.R new file mode 100644 index 0000000..ec157a3 --- /dev/null +++ b/inst/plumber/unbiased_api/meta.R @@ -0,0 +1,17 @@ +#* Github commit SHA +#* +#* Each release of the API is based on some Github commit. This endpoint allows +#* the user to easily check the SHA of the deployed API version. +#* +#* @tag other +#* @get /sha +#* @serializer unboxedJSON +unbiased:::wrap_endpoint(function(req, res) { + sha <- Sys.getenv("GITHUB_SHA", unset = "NULL") + if (sha == "NULL") { + res$status <- 404 + return(c(error = "SHA not found")) + } else { + return(sha) + } +}) diff --git a/inst/plumber/unbiased_api/plumber.R b/inst/plumber/unbiased_api/plumber.R new file mode 100644 index 0000000..4b1243b --- /dev/null +++ b/inst/plumber/unbiased_api/plumber.R @@ -0,0 +1,115 @@ +#* @apiTitle Unbiased +#* @apiDescription This API provides a diverse range of randomization +#* algorithms specifically designed for use in clinical trials. It supports +#* dynamic strategies such as the minimization method, as well as simpler +#* approaches including standard and block randomization. The main goal of +#* this API is to ensure seamless integration with electronic Case Report +#* Form (eCRF) systems, facilitating efficient patient allocation management +#* in clinical trials. +#* @apiContact list(name = "GitHub", +#* url = "https://ttscience.github.io/unbiased/") +#* @apiLicense list(name = "MIT", +#* url = "https://github.com/ttscience/unbiased/LICENSE.md") +#* @apiVersion 1.0.0 +#* @apiTag initialize Endpoints that initialize study with chosen +#* randomization method and parameters. +#* @apiTag randomize Endpoints that randomize individual patients after the +#* study was created. +#* @apiTag read Endpoints that read created records +#* @apiTag other Other endpoints (helpers etc.). +#* +#* @plumber +function(api) { + meta <- plumber::pr("meta.R") + study <- plumber::pr("study.R") + + meta |> + plumber::pr_set_error(unbiased:::default_error_handler) + + study |> + plumber::pr_set_error(unbiased:::default_error_handler) + + api |> + plumber::pr_set_error(unbiased:::default_error_handler) |> + unbiased:::setup_invalid_json_handler() + + api |> + plumber::pr_mount("/meta", meta) |> + plumber::pr_mount("/study", study) |> + unbiased:::setup_audit_trail(endpoints = list( + "^/study.*" + )) |> + plumber::pr_set_api_spec(function(spec) { + spec$ + paths$ + `/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$schema$properties$ + arms$example <- list("placebo" = 1, "active" = 1) + spec$ + paths$ + `/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$schema$properties$ + identifier$example <- "CSN" + spec$ + paths$ + `/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$schema$properties$ + p$example <- 0.85 + spec$ + paths$`/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$ + schema$properties$ + name$example <- "Clinical Study Name" + spec$ + paths$`/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$ + schema$properties$ + method$example <- "range" + # example of how to define covariates in minimisation pocock + spec$ + paths$`/study/minimisation_pocock`$ + post$requestBody$ + content$`application/json`$ + schema$properties$ + covariates$example <- + list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + spec$ + paths$`/study/{study_id}/patient`$ + post$requestBody$content$`application/json`$ + schema$properties$current_state$example <- + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more"), + "arm" = c("placebo", "") + ) + spec + }) +} + + +#* Log request data +#* +#* @filter logger +function(req) { + cat( + "[QUERY]", + req$REQUEST_METHOD, req$PATH_INFO, + "@", req$REMOTE_ADDR, "\n" + ) + + plumber::forward() +} diff --git a/inst/plumber/unbiased_api/study.R b/inst/plumber/unbiased_api/study.R new file mode 100644 index 0000000..bc5c3a3 --- /dev/null +++ b/inst/plumber/unbiased_api/study.R @@ -0,0 +1,110 @@ +#* Initialize a study with Pocock's minimisation randomization +#* +#* Set up a new study for randomization defining its parameters +#* +#* +#* @param identifier:object Study code, at most 12 characters. +#* @param name:object Full study name. +#* @param method:object Function used to compute within-arm variability, +#* must be one of: sd, var, range +#* @param p:object Proportion of randomness (0, 1) in the randomization vs +#* determinism (e.g. 0.85 equals 85% deterministic) +#* @param arms:object Arm names (character) with their ratios (integer). +#* @param covariates:object Covariate names (character), allowed levels +#* (character) and covariate weights (double). +#* +#* @tag initialize +#* +#* @post /minimisation_pocock +#* @serializer unboxedJSON +#* +unbiased:::wrap_endpoint(function( + identifier, name, method, arms, covariates, p, req, res) { + return( + unbiased:::api__minimization_pocock( + identifier, name, method, arms, covariates, p, req, res + ) + ) +}) + +#* Randomize one patient +#* +#* +#* @param study_id:int Study identifier +#* @param current_state:object +#* +#* @tag randomize +#* @post //patient +#* @serializer unboxedJSON +#* + +unbiased:::wrap_endpoint(function(study_id, current_state, req, res) { + return( + unbiased:::api__randomize_patient(study_id, current_state, req, res) + ) +}) + + +#* Get study audit log +#* +#* Get the audit log for a study +#* +#* +#* @param study_id:int Study identifier +#* +#* @tag audit +#* @get //audit +#* @serializer unboxedJSON +#* +unbiased:::wrap_endpoint(function(study_id, req, res) { + return( + unbiased:::api_get_audit_log(study_id, req, res) + ) +}) + + +#* Get all available studies +#* +#* @return tibble with study_id, identifier, name and method +#* +#* @tag read +#* @get / +#* @serializer unboxedJSON +#* + +unbiased:::wrap_endpoint(function(req, res) { + return( + unbiased:::api_get_study(req, res) + ) +}) + +#* Get all records for chosen study +#* +#* @param study_id:int Study identifier +#* +#* @tag read +#* @get / +#* +#* @serializer unboxedJSON +#* + +unbiased:::wrap_endpoint(function(study_id, req, res) { + return( + unbiased:::api_get_study_records(study_id, req, res) + ) +}) + +#* Get randomization list +#* +#* @param study_id:int Study identifier +#* +#* @tag read +#* @get //randomization_list +#* @serializer unboxedJSON +#* + +unbiased:::wrap_endpoint(function(study_id, req, res) { + return( + unbiased:::api_get_rand_list(study_id, req, res) + ) +}) diff --git a/man/AuditLog.Rd b/man/AuditLog.Rd new file mode 100644 index 0000000..dd413d9 --- /dev/null +++ b/man/AuditLog.Rd @@ -0,0 +1,152 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/audit-trail.R +\name{AuditLog} +\alias{AuditLog} +\title{AuditLog Class} +\description{ +This class is used internally to store audit logs for each request. +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-AuditLog-new}{\code{AuditLog$new()}} +\item \href{#method-AuditLog-disable}{\code{AuditLog$disable()}} +\item \href{#method-AuditLog-is_enabled}{\code{AuditLog$is_enabled()}} +\item \href{#method-AuditLog-set_request_body}{\code{AuditLog$set_request_body()}} +\item \href{#method-AuditLog-set_response_body}{\code{AuditLog$set_response_body()}} +\item \href{#method-AuditLog-set_ip_address}{\code{AuditLog$set_ip_address()}} +\item \href{#method-AuditLog-set_user_agent}{\code{AuditLog$set_user_agent()}} +\item \href{#method-AuditLog-set_event_type}{\code{AuditLog$set_event_type()}} +\item \href{#method-AuditLog-set_study_id}{\code{AuditLog$set_study_id()}} +\item \href{#method-AuditLog-set_response_code}{\code{AuditLog$set_response_code()}} +\item \href{#method-AuditLog-validate_log}{\code{AuditLog$validate_log()}} +\item \href{#method-AuditLog-persist}{\code{AuditLog$persist()}} +\item \href{#method-AuditLog-clone}{\code{AuditLog$clone()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-new}{}}} +\subsection{Method \code{new()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$new(request_method, endpoint_url)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-disable}{}}} +\subsection{Method \code{disable()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$disable()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-is_enabled}{}}} +\subsection{Method \code{is_enabled()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$is_enabled()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_request_body}{}}} +\subsection{Method \code{set_request_body()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_request_body(request_body)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_response_body}{}}} +\subsection{Method \code{set_response_body()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_response_body(response_body)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_ip_address}{}}} +\subsection{Method \code{set_ip_address()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_ip_address(ip_address)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_user_agent}{}}} +\subsection{Method \code{set_user_agent()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_user_agent(user_agent)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_event_type}{}}} +\subsection{Method \code{set_event_type()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_event_type(event_type)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_study_id}{}}} +\subsection{Method \code{set_study_id()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_study_id(study_id)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-set_response_code}{}}} +\subsection{Method \code{set_response_code()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$set_response_code(response_code)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-validate_log}{}}} +\subsection{Method \code{validate_log()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$validate_log()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-persist}{}}} +\subsection{Method \code{persist()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$persist()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AuditLog-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{AuditLog$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/audit_log_set_event_type.Rd b/man/audit_log_set_event_type.Rd new file mode 100644 index 0000000..40d7f85 --- /dev/null +++ b/man/audit_log_set_event_type.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/audit-trail.R +\name{audit_log_set_event_type} +\alias{audit_log_set_event_type} +\title{Set Audit Log Event Type} +\usage{ +audit_log_set_event_type(event_type, req) +} +\arguments{ +\item{event_type}{The event type to be set for the audit log.} + +\item{req}{The request object, which should contain an audit log in its internal data.} +} +\value{ +Returns nothing as it modifies the audit log in-place. +} +\description{ +This function sets the event type for an audit log. It retrieves the audit log from the request's +internal data, and then calls the audit log's set_event_type method with the provided event type. +} diff --git a/man/audit_log_set_study_id.Rd b/man/audit_log_set_study_id.Rd new file mode 100644 index 0000000..6fe9076 --- /dev/null +++ b/man/audit_log_set_study_id.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/audit-trail.R +\name{audit_log_set_study_id} +\alias{audit_log_set_study_id} +\title{Set Audit Log Study ID} +\usage{ +audit_log_set_study_id(study_id, req) +} +\arguments{ +\item{study_id}{The study ID to be set for the audit log.} + +\item{req}{The request object, which should contain an audit log in its internal data.} +} +\value{ +Returns nothing as it modifies the audit log in-place. +} +\description{ +This function sets the study ID for an audit log. It retrieves the audit log from the request's +internal data, and then calls the audit log's set_study_id method with the provided study ID. +} diff --git a/man/compare_rows.Rd b/man/compare_rows.Rd new file mode 100644 index 0000000..da314a0 --- /dev/null +++ b/man/compare_rows.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/randomize-minimisation-pocock.R +\name{compare_rows} +\alias{compare_rows} +\title{Compare rows of two dataframes} +\usage{ +compare_rows(all_patients, new_patients) +} +\arguments{ +\item{all_patients}{data.frame with all patients} + +\item{new_patients}{data.frame with new patient} +} +\value{ +data.frame with columns as in all_patients and new_patients, +filled with TRUE if there is match in covariate and FALSE if not +} +\description{ +Takes dataframe all_patients (presumably with one row / patient) and +compares it to all rows of new_patients (presumably already randomized +patients) +} diff --git a/man/create_db_connection_pool.Rd b/man/create_db_connection_pool.Rd new file mode 100644 index 0000000..9a76532 --- /dev/null +++ b/man/create_db_connection_pool.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/db.R +\name{create_db_connection_pool} +\alias{create_db_connection_pool} +\title{Defines methods for interacting with the study in the database +Create a database connection pool} +\usage{ +create_db_connection_pool(...) +} +\value{ +A pool object representing the connection pool to the database. +} +\description{ +This function creates a connection pool to a PostgreSQL database. It uses +environment variables to get the necessary connection parameters. If the +connection fails, it will retry up to 5 times with a delay of 2 seconds +between each attempt. +} +\examples{ +\dontrun{ +pool <- create_db_connection_pool() +} +} diff --git a/man/randomize_minimisation_pocock.Rd b/man/randomize_minimisation_pocock.Rd new file mode 100644 index 0000000..d71f807 --- /dev/null +++ b/man/randomize_minimisation_pocock.Rd @@ -0,0 +1,126 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/randomize-minimisation-pocock.R +\name{randomize_minimisation_pocock} +\alias{randomize_minimisation_pocock} +\title{Patient Randomization Using Minimization Method} +\usage{ +randomize_minimisation_pocock( + arms, + current_state, + weights, + ratio, + method = "var", + p = 0.85 +) +} +\arguments{ +\item{arms}{\code{character()}\cr +Arm names.} + +\item{current_state}{\code{tibble()}\cr +table of covariates and current arm assignments in column \code{arm}, +last row contains the new patient with empty string for \code{arm}} + +\item{weights}{\code{numeric()}\cr +vector of positive weights, equal in length to number of covariates, +numbered after covariates, defaults to equal weights} + +\item{ratio}{\code{integer()}\cr +Vector of positive integers (0 is allowed), equal in length to number +of arms, named after arms, defaults to equal weight} + +\item{method}{\code{character()}\cr +Function used to compute within-arm variability, must be one of: +\code{sd}, \code{var}, \code{range}, defaults to \code{var}} + +\item{p}{\code{numeric()}\cr +single value, proportion of randomness (0, 1) in the randomization +vs determinism, defaults to 85\% deterministic} +} +\value{ +\code{character()}\cr +name of the arm assigned to the patient +} +\description{ +\loadmathjax +The \code{randomize_dynamic} function implements the dynamic randomization +algorithm using the minimization method proposed by Pocock (Pocock and Simon, +1975). It requires defining basic study parameters: the number of arms (K), +number of covariates (C), patient allocation ratios (\(a_{k}\)) +(where k = 1,2,
., K), weights for the covariates (\(w_{i}\)) +(where i = 1,2,
., C), and the maximum probability (p) of assigning a patient +to the group with the smallest total unbalance multiplied by +the respective weights (\(G_{k}\)). As the total unbalance for the first +patient is the same regardless of the assigned arm, this patient is randomly +allocated to a given arm. Subsequent patients are randomized based on the +calculation of the unbalance depending on the selected method: "range", +"var" (variance), or "sd" (standard deviation). In the case of two arms, +the "range" method is equivalent to the "sd" method. +} +\details{ +Initially, the algorithm creates a matrix of results comparing a newly +randomized patient with the current balance of patients based on the defined +covariates. In the next step, for each arm and specified covariate, +various scenarios of patient allocation are calculated. The existing results +(n) are updated with the new patient, and then, considering the ratio +coefficients, the results are divided by the specific allocation ratio. +Depending on the method, the total unbalance is then calculated, +taking into account the weights, and the number of covariates using one +of three methods (“sd”, “range”, “var”). +Based on the number of defined arms, the minimum value of (\(G_{k}\)) +(defined as the weighted sum of the level-based imbalance) selects the arm to +which the patient will be assigned with a predefined probability (p). The +probability that a patient will be assigned to any other arm will then be +equal (1-p)/(K-1) +for each of the remaining arms. +} +\note{ +This function's implementation is a refactored adaptation +of the codebase from the 'Minirand' package. +} +\examples{ +n_at_the_moment <- 10 +arms <- c("control", "active low", "active high") +sex <- sample(c("F", "M"), + n_at_the_moment + 1, + replace = TRUE, + prob = c(0.4, 0.6) +) +diabetes <- + sample(c("diabetes", "no diabetes"), + n_at_the_moment + 1, + replace = TRUE, + prob = c(0.2, 0.8) + ) +arm <- + sample(arms, + n_at_the_moment, + replace = TRUE, + prob = c(0.4, 0.4, 0.2) + ) |> + c("") +covar_df <- tibble::tibble(sex, diabetes, arm) +covar_df + +randomize_minimisation_pocock(arms = arms, current_state = covar_df) +randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + ratio = c( + "control" = 1, + "active low" = 2, + "active high" = 2 + ), + weights = c( + "sex" = 0.5, + "diabetes" = 1 + ) +) + +} +\references{ +Pocock, S. J., & Simon, R. (1975). Minimization: A new method +of assigning patients to treatment and control groups in clinical trials. + +Minirand Package: Man Jin, Adam Polis, Jonathan Hartzel. +(https://CRAN.R-project.org/package=Minirand) +} diff --git a/man/randomize_simple.Rd b/man/randomize_simple.Rd new file mode 100644 index 0000000..95599eb --- /dev/null +++ b/man/randomize_simple.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/randomize-simple.R +\name{randomize_simple} +\alias{randomize_simple} +\title{Simple randomization} +\usage{ +randomize_simple(arms, ratio) +} +\arguments{ +\item{arms}{\code{character()}\cr +Arm names.} + +\item{ratio}{\code{integer()}\cr +Vector of positive integers (0 is allowed), equal in length to number +of arms, named after arms, defaults to equal weight} +} +\value{ +Selected arm assignment. +} +\description{ +Randomly assigns a patient to one of the arms according to specified ratios, +regardless of already performed assignments. +} +\examples{ +randomize_simple(c("active", "placebo"), c("active" = 2, "placebo" = 1)) + +} diff --git a/man/run_unbiased.Rd b/man/run_unbiased.Rd new file mode 100644 index 0000000..a7f87f4 --- /dev/null +++ b/man/run_unbiased.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/run-api.R +\name{run_unbiased} +\alias{run_unbiased} +\title{Run API} +\usage{ +run_unbiased() +} +\arguments{ +\item{host}{\code{character(1)}\cr +Host URL.} + +\item{port}{\code{integer(1)}\cr +Port to serve API under.} +} +\value{ +Function called to serve the API in the caller thread. +} +\description{ +Starts \pkg{unbiased} API. +} diff --git a/man/setup_audit_trail.Rd b/man/setup_audit_trail.Rd new file mode 100644 index 0000000..129039f --- /dev/null +++ b/man/setup_audit_trail.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/audit-trail.R +\name{setup_audit_trail} +\alias{setup_audit_trail} +\title{Set up audit trail} +\usage{ +setup_audit_trail(pr, endpoints = list()) +} +\arguments{ +\item{pr}{A plumber router for which the audit trail is to be set up.} + +\item{endpoints}{A list of regex patterns for which the audit trail should be enabled.} +} +\value{ +Returns the updated plumber router with the audit trail hooks. +} +\description{ +This function sets up an audit trail for a given process. It uses plumber's hooks to log +information before routing (preroute) and after serializing the response (postserialize). +} +\details{ +This function modifies the plumber router in place and returns the updated router. +} +\examples{ +pr <- plumber::plumb("your-api-definition.R") |> + setup_audit_trail() +} diff --git a/man/setup_sentry.Rd b/man/setup_sentry.Rd new file mode 100644 index 0000000..911f563 --- /dev/null +++ b/man/setup_sentry.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/error-handling.R +\name{setup_sentry} +\alias{setup_sentry} +\title{setup_sentry function} +\usage{ +setup_sentry() +} +\arguments{ +\item{None}{} +} +\value{ +None. If the SENTRY_DSN environment variable is not set, the function will +return a message and stop execution. +} +\description{ +This function is used to configure Sentry, a service for real-time error tracking. +It uses the sentryR package to set up Sentry based on environment variables. +} +\details{ +The function first checks if the SENTRY_DSN environment variable is set. If not, it +returns a message and stops execution. +If SENTRY_DSN is set, it uses the sentryR::configure_sentry function to set up Sentry with +the following parameters: +\itemize{ +\item dsn: The Data Source Name (DSN) is retrieved from the SENTRY_DSN environment variable. +\item app_name: The application name is set to "unbiased". +\item app_version: The application version is retrieved from the GITHUB_SHA environment variable. +If not set, it defaults to "unspecified". +\item environment: The environment is retrieved from the SENTRY_ENVIRONMENT environment variable. +If not set, it defaults to "development". +\item release: The release is retrieved from the SENTRY_RELEASE environment variable. +If not set, it defaults to "unspecified". +} +} +\examples{ +setup_sentry() + +} +\seealso{ +\url{https://docs.sentry.io/} +} diff --git a/man/unbiased-package.Rd b/man/unbiased-package.Rd new file mode 100644 index 0000000..3b046fa --- /dev/null +++ b/man/unbiased-package.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/unbiased-package.R +\docType{package} +\name{unbiased-package} +\alias{unbiased} +\alias{unbiased-package} +\title{unbiased: Diverse Randomization Algorithms for Clinical Trials} +\description{ +The Unbiased package offers a comprehensive suite of randomization algorithms for clinical trials, encompassing dynamic strategies like the minimization method, simple randomization approaches, and block randomization techniques. Its primary purpose is to provide a harmonized set of functions that will seamlessly integrate with a production-ready plumber API, also contained within the package. This integration is designed to facilitate a smooth and efficient interface with electronic Case Report Form (eCRF) systems, enhancing the capability of clinical trials to manage patient allocation. +} +\seealso{ +Useful links: +\itemize{ + \item \url{https://ttscience.github.io/unbiased/} +} + +} +\author{ +\strong{Maintainer}: Kamil Sijko \email{kamil.sijko@ttsi.com.pl} (\href{https://orcid.org/0000-0002-2203-1065}{ORCID}) + +Authors: +\itemize{ + \item Kinga SaƂata \email{kinga.salata@ttsi.com.pl} + \item Aleksandra Duda \email{aleksandra.duda@ttsi.com.pl} + \item Ɓukasz WaƂejko \email{lukasz.walejko@ttsi.com.pl} + \item Jagoda jagoda.glowacka-walas@ttsi.com.pl GƂowacka-Walas (\href{https://orcid.org/0000-0002-7628-8691}{ORCID}) +} + +Other contributors: +\itemize{ + \item MichaƂ Seweryn \email{michal.seweryn@biol.uni.lodz.pl} (\href{https://orcid.org/0000-0002-9090-3435}{ORCID}) [contractor] + \item Transition Technologies Science Sp. z o.o. [funder, copyright holder] +} + +} +\keyword{internal} diff --git a/migrate_db.sh b/migrate_db.sh new file mode 100755 index 0000000..e4d342a --- /dev/null +++ b/migrate_db.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +echo "Running database migrations" + +echo "Using database $POSTGRES_DB" + +DB_CONNECTION_STRING="postgres://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST:$POSTGRES_PORT/$POSTGRES_DB?sslmode=disable" + +# Run the migrations, pass command line arguments to the migration tool +migrate -database "$DB_CONNECTION_STRING" -path ./inst/db/migrations "$@" \ No newline at end of file diff --git a/renv.lock b/renv.lock index d27d571..d2a95c6 100644 --- a/renv.lock +++ b/renv.lock @@ -1,6 +1,6 @@ { "R": { - "Version": "4.2.1", + "Version": "4.2.3", "Repositories": [ { "Name": "CRAN", @@ -9,530 +9,2300 @@ ] }, "Packages": { - "R6": { - "Package": "R6", - "Version": "2.5.1", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R" - ], - "Hash": "470851b6d5d0ac559e9d01bb352b4021" - }, - "Rcpp": { - "Package": "Rcpp", - "Version": "1.0.11", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "methods", - "utils" - ], - "Hash": "ae6cbbe1492f4de79c45fce06f967ce8" - }, - "askpass": { - "Package": "askpass", - "Version": "1.2.0", + "BH": { + "Package": "BH", + "Version": "1.84.0-0", "Source": "Repository", "Repository": "RSPM", - "Requirements": [ - "sys" - ], - "Hash": "cad6cf7f1d5f6e906700b9d3e718c796" + "Hash": "a8235afbcd6316e6e91433ea47661013" }, - "backports": { - "Package": "backports", - "Version": "1.4.1", + "DBI": { + "Package": "DBI", + "Version": "1.2.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ - "R" + "R", + "methods" ], - "Hash": "c39fbec8a30d23e721980b8afb31984c" - }, - "brio": { - "Package": "brio", - "Version": "1.1.3", - "Source": "Repository", - "Repository": "RSPM", - "Hash": "976cf154dfb043c012d87cddd8bca363" + "Hash": "164809cd72e1d5160b4cb3aa57f510fe" }, - "callr": { - "Package": "callr", - "Version": "3.7.3", + "MASS": { + "Package": "MASS", + "Version": "7.3-60.0.1", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", - "R6", - "processx", + "grDevices", + "graphics", + "methods", + "stats", "utils" ], - "Hash": "9b2191ede20fa29828139b9900922e51" + "Hash": "b765b28387acc8ec9e9c1530713cb19c" }, - "checkmate": { - "Package": "checkmate", - "Version": "2.2.0", + "Matrix": { + "Package": "Matrix", + "Version": "1.6-5", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", - "backports", + "grDevices", + "graphics", + "grid", + "lattice", + "methods", + "stats", "utils" ], - "Hash": "ca9c113196136f4a9ca9ce6079c2c99e" + "Hash": "8c7115cd3a0e048bda2a7cd110549f7a" }, - "cli": { - "Package": "cli", - "Version": "3.6.1", + "PwrGSD": { + "Package": "PwrGSD", + "Version": "2.3.6", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ - "R", - "utils" + "survival" ], - "Hash": "89e6d8219950eac806ae0c489052048a" + "Hash": "c26126e59b9b078953521379ee219a05" }, - "crayon": { - "Package": "crayon", - "Version": "1.5.2", + "R6": { + "Package": "R6", + "Version": "2.5.1", "Source": "Repository", "Repository": "CRAN", "Requirements": [ - "grDevices", - "methods", - "utils" + "R" ], - "Hash": "e8a1e41acf02548751f45c718d55aa6a" + "Hash": "470851b6d5d0ac559e9d01bb352b4021" }, - "curl": { - "Package": "curl", - "Version": "5.1.0", + "RColorBrewer": { + "Package": "RColorBrewer", + "Version": "1.1-3", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "9123f3ef96a2c1a93927d828b2fe7d4c" + "Hash": "45f0398006e83a5b10b72a90663d8d8c" }, - "desc": { - "Package": "desc", - "Version": "1.4.2", + "RPostgres": { + "Package": "RPostgres", + "Version": "1.4.6", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ + "DBI", "R", - "R6", - "cli", - "rprojroot", - "utils" + "bit64", + "blob", + "cpp11", + "hms", + "lubridate", + "methods", + "plogr", + "withr" ], - "Hash": "6b9602c7ebbe87101a9c8edb6e8b6d21" + "Hash": "a3ccabc3de4657c14185c91f3e6d4b60" }, - "diffobj": { - "Package": "diffobj", - "Version": "0.3.5", + "Rcpp": { + "Package": "Rcpp", + "Version": "1.0.12", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "R", - "crayon", "methods", - "stats", - "tools", "utils" ], - "Hash": "bcaa8b95f8d7d01a5dedfd959ce88ab8" + "Hash": "5ea2700d21e038ace58269ecdbeb9ec0" }, - "digest": { - "Package": "digest", - "Version": "0.6.33", + "RcppArmadillo": { + "Package": "RcppArmadillo", + "Version": "0.12.8.1.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", + "Rcpp", + "methods", + "stats", "utils" ], - "Hash": "b18a9cf3c003977b0cc49d5e76ebe48d" + "Hash": "e78bbbb81a5dcd71a4bd3268d6ede0b1" }, - "ellipsis": { - "Package": "ellipsis", - "Version": "0.3.2", + "RcppEigen": { + "Package": "RcppEigen", + "Version": "0.3.4.0.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", - "rlang" + "Rcpp", + "stats", + "utils" ], - "Hash": "bb0eec2fe32e88d9e2836c2f73ea2077" + "Hash": "df49e3306f232ec28f1604e36a202847" }, - "evaluate": { - "Package": "evaluate", - "Version": "0.22", + "TH.data": { + "Package": "TH.data", + "Version": "1.1-2", "Source": "Repository", "Repository": "RSPM", "Requirements": [ + "MASS", "R", - "methods" + "survival" ], - "Hash": "66f39c7a21e03c4dcb2c2d21d738d603" + "Hash": "5b250ad4c5863ee4a68e280fcb0a3600" }, - "fansi": { - "Package": "fansi", - "Version": "1.0.5", + "V8": { + "Package": "V8", + "Version": "4.4.2", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "R", - "grDevices", + "Rcpp", + "curl", + "jsonlite", "utils" ], - "Hash": "3e8583a60163b4bc1a80016e63b9959e" - }, - "fastmap": { - "Package": "fastmap", - "Version": "1.1.1", - "Source": "Repository", - "Repository": "CRAN", - "Hash": "f7736a18de97dea803bde0a2daaafb27" + "Hash": "ca98390ad1cef2a5a609597b49d3d042" }, - "fs": { - "Package": "fs", - "Version": "1.6.3", + "askpass": { + "Package": "askpass", + "Version": "1.2.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "R", - "methods" + "sys" ], - "Hash": "47b5f30c720c23999b913a1a635cf0bb" + "Hash": "cad6cf7f1d5f6e906700b9d3e718c796" }, - "glue": { - "Package": "glue", - "Version": "1.6.2", + "backports": { + "Package": "backports", + "Version": "1.4.1", "Source": "Repository", "Repository": "CRAN", "Requirements": [ - "R", - "methods" + "R" ], - "Hash": "4f2596dfb05dac67b9dc558e5c6fba2e" + "Hash": "c39fbec8a30d23e721980b8afb31984c" }, - "httpuv": { - "Package": "httpuv", - "Version": "1.6.11", + "base64enc": { + "Package": "base64enc", + "Version": "0.1-3", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ - "R", - "R6", - "Rcpp", - "later", - "promises", - "utils" + "R" ], - "Hash": "838602f54e32c1a0f8cc80708cefcefa" + "Hash": "543776ae6848fde2f48ff3816d0628bc" }, - "httr2": { - "Package": "httr2", - "Version": "0.2.3", + "bigD": { + "Package": "bigD", + "Version": "0.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "93637e906f3fe962413912c956eb44db" + }, + "bigmemory": { + "Package": "bigmemory", + "Version": "4.6.4", "Source": "Repository", "Repository": "RSPM", "Requirements": [ + "BH", "R", - "R6", - "cli", - "curl", - "glue", - "magrittr", - "openssl", - "rappdirs", - "rlang", - "withr" + "Rcpp", + "bigmemory.sri", + "methods", + "utils", + "uuid" ], - "Hash": "193bb297368afbbb42dc85784a46b36e" + "Hash": "96b3f1272c36f003f6c6c34171a57e05" }, - "jsonlite": { - "Package": "jsonlite", - "Version": "1.8.7", + "bigmemory.sri": { + "Package": "bigmemory.sri", + "Version": "0.1.8", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "methods" ], - "Hash": "266a20443ca13c65688b2116d5220f76" + "Hash": "ca3079c10ffaf7c18b783f13d3a0bc2f" }, - "later": { - "Package": "later", - "Version": "1.3.1", + "bit": { + "Package": "bit", + "Version": "4.0.5", "Source": "Repository", "Repository": "CRAN", "Requirements": [ - "Rcpp", - "rlang" + "R" ], - "Hash": "40401c9cf2bc2259dfe83311c9384710" + "Hash": "d242abec29412ce988848d0294b208fd" }, - "lifecycle": { - "Package": "lifecycle", - "Version": "1.0.3", + "bit64": { + "Package": "bit64", + "Version": "4.0.5", "Source": "Repository", "Repository": "CRAN", "Requirements": [ "R", - "cli", - "glue", - "rlang" + "bit", + "methods", + "stats", + "utils" ], - "Hash": "001cecbeac1cff9301bdc3775ee46a86" + "Hash": "9fe98599ca456d6552421db0d6772d8f" }, - "magrittr": { - "Package": "magrittr", - "Version": "2.0.3", + "bitops": { + "Package": "bitops", + "Version": "1.0-7", "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R" - ], - "Hash": "7ce2733a9826b3aeb1775d56fd305472" + "Repository": "RSPM", + "Hash": "b7d8d8ee39869c18d8846a184dd8a1af" }, - "mime": { - "Package": "mime", - "Version": "0.12", + "blob": { + "Package": "blob", + "Version": "1.2.4", "Source": "Repository", "Repository": "CRAN", "Requirements": [ - "tools" + "methods", + "rlang", + "vctrs" ], - "Hash": "18e9c28c1d3ca1560ce30658b22ce104" + "Hash": "40415719b5a479b87949f3aa0aee737c" }, - "openssl": { - "Package": "openssl", - "Version": "2.1.1", + "brew": { + "Package": "brew", + "Version": "1.0-10", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "8f4a384e19dccd8c65356dc096847b76" + }, + "brio": { + "Package": "brio", + "Version": "1.1.4", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "askpass" + "R" ], - "Hash": "2a0dc8c6adfb6f032e4d4af82d258ab5" + "Hash": "68bd2b066e1fe780bbf62fc8bcc36de3" }, - "pillar": { - "Package": "pillar", - "Version": "1.9.0", + "broom": { + "Package": "broom", + "Version": "1.0.5", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "cli", - "fansi", + "R", + "backports", + "dplyr", + "ellipsis", + "generics", "glue", "lifecycle", + "purrr", "rlang", - "utf8", - "utils", - "vctrs" + "stringr", + "tibble", + "tidyr" ], - "Hash": "15da5a8412f317beeee6175fbc76f4bb" + "Hash": "fd25391c3c4f6ecf0fa95f1e6d15378c" }, - "pkgbuild": { - "Package": "pkgbuild", - "Version": "1.4.2", + "broom.helpers": { + "Package": "broom.helpers", + "Version": "1.14.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", - "R6", - "callr", + "broom", "cli", - "crayon", - "desc", - "prettyunits", - "processx", - "rprojroot" + "dplyr", + "labelled", + "lifecycle", + "purrr", + "rlang", + "stats", + "stringr", + "tibble", + "tidyr" ], - "Hash": "beb25b32a957a22a5c301a9e441190b3" + "Hash": "ea30eb5d9412a4a5c2740685f680cd49" }, - "pkgconfig": { - "Package": "pkgconfig", + "bslib": { + "Package": "bslib", + "Version": "0.6.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "base64enc", + "cachem", + "grDevices", + "htmltools", + "jquerylib", + "jsonlite", + "lifecycle", + "memoise", + "mime", + "rlang", + "sass" + ], + "Hash": "c0d8599494bc7fb408cd206bbdd9cab0" + }, + "cachem": { + "Package": "cachem", + "Version": "1.0.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "fastmap", + "rlang" + ], + "Hash": "c35768291560ce302c0a6589f92e837d" + }, + "callr": { + "Package": "callr", + "Version": "3.7.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "processx", + "utils" + ], + "Hash": "9f0e4fae4963ba775a5e5c520838c87b" + }, + "checkmate": { + "Package": "checkmate", + "Version": "2.3.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "backports", + "utils" + ], + "Hash": "c01cab1cb0f9125211a6fc99d540e315" + }, + "class": { + "Package": "class", + "Version": "7.3-22", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "stats", + "utils" + ], + "Hash": "f91f6b29f38b8c280f2b9477787d4bb2" + }, + "cli": { + "Package": "cli", + "Version": "3.6.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "utils" + ], + "Hash": "1216ac65ac55ec0058a6f75d7ca0fd52" + }, + "clipr": { + "Package": "clipr", + "Version": "0.8.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "utils" + ], + "Hash": "3f038e5ac7f41d4ac41ce658c85e3042" + }, + "codetools": { + "Package": "codetools", + "Version": "0.2-19", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R" + ], + "Hash": "c089a619a7fae175d149d89164f8c7d8" + }, + "coin": { + "Package": "coin", + "Version": "1.4-3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "libcoin", + "matrixStats", + "methods", + "modeltools", + "multcomp", + "mvtnorm", + "parallel", + "stats", + "stats4", + "survival", + "utils" + ], + "Hash": "4084b5070a40ad99dad581ed3b67bd55" + }, + "colorspace": { + "Package": "colorspace", + "Version": "2.1-0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "methods", + "stats" + ], + "Hash": "f20c47fd52fae58b4e377c37bb8c335b" + }, + "commonmark": { + "Package": "commonmark", + "Version": "1.9.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "5d8225445acb167abf7797de48b2ee3c" + }, + "cpp11": { + "Package": "cpp11", + "Version": "0.4.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "5a295d7d963cc5035284dcdbaf334f4e" + }, + "crayon": { + "Package": "crayon", + "Version": "1.5.2", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "grDevices", + "methods", + "utils" + ], + "Hash": "e8a1e41acf02548751f45c718d55aa6a" + }, + "credentials": { + "Package": "credentials", + "Version": "2.0.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "askpass", + "curl", + "jsonlite", + "openssl", + "sys" + ], + "Hash": "c7844b32098dcbd1c59cbd8dddb4ecc6" + }, + "curl": { + "Package": "curl", + "Version": "5.2.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "411ca2c03b1ce5f548345d2fc2685f7a" + }, + "data.table": { + "Package": "data.table", + "Version": "1.15.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods" + ], + "Hash": "536dfe4ac4093b5d115caed7a1a7223b" + }, + "dbplyr": { + "Package": "dbplyr", + "Version": "2.4.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "DBI", + "R", + "R6", + "blob", + "cli", + "dplyr", + "glue", + "lifecycle", + "magrittr", + "methods", + "pillar", + "purrr", + "rlang", + "tibble", + "tidyr", + "tidyselect", + "utils", + "vctrs", + "withr" + ], + "Hash": "59351f28a81f0742720b85363c4fdd61" + }, + "desc": { + "Package": "desc", + "Version": "1.4.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "cli", + "utils" + ], + "Hash": "99b79fcbd6c4d1ce087f5c5c758b384f" + }, + "devtools": { + "Package": "devtools", + "Version": "2.4.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "desc", + "ellipsis", + "fs", + "lifecycle", + "memoise", + "miniUI", + "pkgbuild", + "pkgdown", + "pkgload", + "profvis", + "rcmdcheck", + "remotes", + "rlang", + "roxygen2", + "rversions", + "sessioninfo", + "stats", + "testthat", + "tools", + "urlchecker", + "usethis", + "utils", + "withr" + ], + "Hash": "ea5bc8b4a6a01e4f12d98b58329930bb" + }, + "diffobj": { + "Package": "diffobj", + "Version": "0.3.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "crayon", + "methods", + "stats", + "tools", + "utils" + ], + "Hash": "bcaa8b95f8d7d01a5dedfd959ce88ab8" + }, + "digest": { + "Package": "digest", + "Version": "0.6.34", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "utils" + ], + "Hash": "7ede2ee9ea8d3edbf1ca84c1e333ad1a" + }, + "downlit": { + "Package": "downlit", + "Version": "0.4.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "brio", + "desc", + "digest", + "evaluate", + "fansi", + "memoise", + "rlang", + "vctrs", + "withr", + "yaml" + ], + "Hash": "14fa1f248b60ed67e1f5418391a17b14" + }, + "dplyr": { + "Package": "dplyr", + "Version": "1.1.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "cli", + "generics", + "glue", + "lifecycle", + "magrittr", + "methods", + "pillar", + "rlang", + "tibble", + "tidyselect", + "utils", + "vctrs" + ], + "Hash": "fedd9d00c2944ff00a0e2696ccf048ec" + }, + "e1071": { + "Package": "e1071", + "Version": "1.7-14", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "class", + "grDevices", + "graphics", + "methods", + "proxy", + "stats", + "utils" + ], + "Hash": "4ef372b716824753719a8a38b258442d" + }, + "ellipsis": { + "Package": "ellipsis", + "Version": "0.3.2", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "rlang" + ], + "Hash": "bb0eec2fe32e88d9e2836c2f73ea2077" + }, + "evaluate": { + "Package": "evaluate", + "Version": "0.23", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods" + ], + "Hash": "daf4a1246be12c1fa8c7705a0935c1a0" + }, + "fansi": { + "Package": "fansi", + "Version": "1.0.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "utils" + ], + "Hash": "962174cf2aeb5b9eea581522286a911f" + }, + "farver": { + "Package": "farver", + "Version": "2.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "8106d78941f34855c440ddb946b8f7a5" + }, + "fastglm": { + "Package": "fastglm", + "Version": "0.0.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "BH", + "Rcpp", + "RcppEigen", + "bigmemory", + "methods" + ], + "Hash": "e0f222ad320efdaa48ebf88eb576bb21" + }, + "fastmap": { + "Package": "fastmap", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "CRAN", + "Hash": "f7736a18de97dea803bde0a2daaafb27" + }, + "fontawesome": { + "Package": "fontawesome", + "Version": "0.5.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "htmltools", + "rlang" + ], + "Hash": "c2efdd5f0bcd1ea861c2d4e2a883a67d" + }, + "forcats": { + "Package": "forcats", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "glue", + "lifecycle", + "magrittr", + "rlang", + "tibble" + ], + "Hash": "1a0a9a3d5083d0d573c4214576f1e690" + }, + "fs": { + "Package": "fs", + "Version": "1.6.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods" + ], + "Hash": "47b5f30c720c23999b913a1a635cf0bb" + }, + "gdata": { + "Package": "gdata", + "Version": "3.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "gtools", + "methods", + "stats", + "utils" + ], + "Hash": "d3d6e4c174b8a5f251fd273f245f2471" + }, + "generics": { + "Package": "generics", + "Version": "0.1.3", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "methods" + ], + "Hash": "15e9634c0fcd294799e9b2e929ed1b86" + }, + "gert": { + "Package": "gert", + "Version": "2.0.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "askpass", + "credentials", + "openssl", + "rstudioapi", + "sys", + "zip" + ], + "Hash": "f70d3fe2d9e7654213a946963d1591eb" + }, + "ggplot2": { + "Package": "ggplot2", + "Version": "3.5.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "cli", + "glue", + "grDevices", + "grid", + "gtable", + "isoband", + "lifecycle", + "mgcv", + "rlang", + "scales", + "stats", + "tibble", + "vctrs", + "withr" + ], + "Hash": "52ef83f93f74833007f193b2d4c159a2" + }, + "gh": { + "Package": "gh", + "Version": "1.4.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "gitcreds", + "httr2", + "ini", + "jsonlite", + "rlang" + ], + "Hash": "03533b1c875028233598f848fda44c4c" + }, + "gitcreds": { + "Package": "gitcreds", + "Version": "0.1.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "ab08ac61f3e1be454ae21911eb8bc2fe" + }, + "glue": { + "Package": "glue", + "Version": "1.7.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods" + ], + "Hash": "e0b3a53876554bd45879e596cdb10a52" + }, + "gmodels": { + "Package": "gmodels", + "Version": "2.18.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "R", + "gdata" + ], + "Hash": "6713a242cb6909e492d8169a35dfe0b0" + }, + "gsDesign": { + "Package": "gsDesign", + "Version": "3.6.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "dplyr", + "ggplot2", + "graphics", + "gt", + "magrittr", + "methods", + "r2rtf", + "rlang", + "stats", + "tibble", + "tidyr", + "tools", + "xtable" + ], + "Hash": "b3490a78ab0a4cd22d19e732b20225da" + }, + "gt": { + "Package": "gt", + "Version": "0.10.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "base64enc", + "bigD", + "bitops", + "cli", + "commonmark", + "dplyr", + "fs", + "glue", + "htmltools", + "htmlwidgets", + "juicyjuice", + "magrittr", + "markdown", + "reactable", + "rlang", + "sass", + "scales", + "tidyselect", + "vctrs", + "xml2" + ], + "Hash": "03009c105dfae79460b8eb9d8cf791e4" + }, + "gtable": { + "Package": "gtable", + "Version": "0.3.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "glue", + "grid", + "lifecycle", + "rlang" + ], + "Hash": "b29cf3031f49b04ab9c852c912547eef" + }, + "gtools": { + "Package": "gtools", + "Version": "3.9.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods", + "stats", + "utils" + ], + "Hash": "588d091c35389f1f4a9d533c8d709b35" + }, + "gtsummary": { + "Package": "gtsummary", + "Version": "1.7.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "broom", + "broom.helpers", + "cli", + "dplyr", + "forcats", + "glue", + "gt", + "knitr", + "lifecycle", + "purrr", + "rlang", + "stringr", + "tibble", + "tidyr", + "vctrs" + ], + "Hash": "08df7405a102e3f0bdf7a13a29e8c6ab" + }, + "haven": { + "Package": "haven", + "Version": "2.5.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "cpp11", + "forcats", + "hms", + "lifecycle", + "methods", + "readr", + "rlang", + "tibble", + "tidyselect", + "vctrs" + ], + "Hash": "9171f898db9d9c4c1b2c745adc2c1ef1" + }, + "highr": { + "Package": "highr", + "Version": "0.10", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "xfun" + ], + "Hash": "06230136b2d2b9ba5805e1963fa6e890" + }, + "hms": { + "Package": "hms", + "Version": "1.1.3", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "lifecycle", + "methods", + "pkgconfig", + "rlang", + "vctrs" + ], + "Hash": "b59377caa7ed00fa41808342002138f9" + }, + "htmltools": { + "Package": "htmltools", + "Version": "0.5.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "base64enc", + "digest", + "ellipsis", + "fastmap", + "grDevices", + "rlang", + "utils" + ], + "Hash": "2d7b3857980e0e0d0a1fd6f11928ab0f" + }, + "htmlwidgets": { + "Package": "htmlwidgets", + "Version": "1.6.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "grDevices", + "htmltools", + "jsonlite", + "knitr", + "rmarkdown", + "yaml" + ], + "Hash": "04291cc45198225444a397606810ac37" + }, + "httpuv": { + "Package": "httpuv", + "Version": "1.6.14", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "Rcpp", + "later", + "promises", + "utils" + ], + "Hash": "16abeb167dbf511f8cc0552efaf05bab" + }, + "httr": { + "Package": "httr", + "Version": "1.4.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "curl", + "jsonlite", + "mime", + "openssl" + ], + "Hash": "ac107251d9d9fd72f0ca8049988f1d7f" + }, + "httr2": { + "Package": "httr2", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "cli", + "curl", + "glue", + "lifecycle", + "magrittr", + "openssl", + "rappdirs", + "rlang", + "vctrs", + "withr" + ], + "Hash": "e2b30f1fc039a0bab047dd52bb20ef71" + }, + "ini": { + "Package": "ini", + "Version": "0.3.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "6154ec2223172bce8162d4153cda21f7" + }, + "insight": { + "Package": "insight", + "Version": "0.19.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods", + "stats", + "utils" + ], + "Hash": "adcc19435135a4d211e5aa2e48e4f6b7" + }, + "isoband": { + "Package": "isoband", + "Version": "0.2.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "grid", + "utils" + ], + "Hash": "0080607b4a1a7b28979aecef976d8bc2" + }, + "jquerylib": { + "Package": "jquerylib", + "Version": "0.1.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "htmltools" + ], + "Hash": "5aab57a3bd297eee1c1d862735972182" + }, + "jsonlite": { + "Package": "jsonlite", + "Version": "1.8.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods" + ], + "Hash": "e1b9c55281c5adc4dd113652d9e26768" + }, + "juicyjuice": { + "Package": "juicyjuice", + "Version": "0.1.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "V8" + ], + "Hash": "3bcd11943da509341838da9399e18bce" + }, + "knitr": { + "Package": "knitr", + "Version": "1.45", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "evaluate", + "highr", + "methods", + "tools", + "xfun", + "yaml" + ], + "Hash": "1ec462871063897135c1bcbe0fc8f07d" + }, + "labeling": { + "Package": "labeling", + "Version": "0.4.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "graphics", + "stats" + ], + "Hash": "b64ec208ac5bc1852b285f665d6368b3" + }, + "labelled": { + "Package": "labelled", + "Version": "2.12.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "dplyr", + "haven", + "lifecycle", + "rlang", + "stringr", + "tidyr", + "vctrs" + ], + "Hash": "1ec27c624ece6c20431e9249bd232797" + }, + "later": { + "Package": "later", + "Version": "1.3.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Rcpp", + "rlang" + ], + "Hash": "a3e051d405326b8b0012377434c62b37" + }, + "lattice": { + "Package": "lattice", + "Version": "0.22-5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "grid", + "stats", + "utils" + ], + "Hash": "7c5e89f04e72d6611c77451f6331a091" + }, + "libcoin": { + "Package": "libcoin", + "Version": "1.0-10", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "mvtnorm", + "stats" + ], + "Hash": "3f3775a14588ff5d013e5eab4453bf28" + }, + "lifecycle": { + "Package": "lifecycle", + "Version": "1.0.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "glue", + "rlang" + ], + "Hash": "b8552d117e1b808b09a832f589b79035" + }, + "lubridate": { + "Package": "lubridate", + "Version": "1.9.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "generics", + "methods", + "timechange" + ], + "Hash": "680ad542fbcf801442c83a6ac5a2126c" + }, + "magrittr": { + "Package": "magrittr", + "Version": "2.0.3", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R" + ], + "Hash": "7ce2733a9826b3aeb1775d56fd305472" + }, + "markdown": { + "Package": "markdown", + "Version": "1.12", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "commonmark", + "utils", + "xfun" + ], + "Hash": "765cf53992401b3b6c297b69e1edb8bd" + }, + "mathjaxr": { + "Package": "mathjaxr", + "Version": "1.6-0", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "87da6ccdcee6077a7d5719406bf3ae45" + }, + "matrixStats": { + "Package": "matrixStats", + "Version": "1.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "33a3ca9e732b57244d14f5d732ffc9eb" + }, + "memoise": { + "Package": "memoise", + "Version": "2.0.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "cachem", + "rlang" + ], + "Hash": "e2817ccf4a065c5d9d7f2cfbe7c1d78c" + }, + "mgcv": { + "Package": "mgcv", + "Version": "1.9-1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Matrix", + "R", + "graphics", + "methods", + "nlme", + "splines", + "stats", + "utils" + ], + "Hash": "110ee9d83b496279960e162ac97764ce" + }, + "mime": { + "Package": "mime", + "Version": "0.12", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "tools" + ], + "Hash": "18e9c28c1d3ca1560ce30658b22ce104" + }, + "miniUI": { + "Package": "miniUI", + "Version": "0.1.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "htmltools", + "shiny", + "utils" + ], + "Hash": "fec5f52652d60615fdb3957b3d74324a" + }, + "minqa": { + "Package": "minqa", + "Version": "1.2.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "Rcpp" + ], + "Hash": "f48238f8d4740426ca12f53f27d004dd" + }, + "mitools": { + "Package": "mitools", + "Version": "2.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "DBI", + "methods", + "stats" + ], + "Hash": "a4b659bd0528226724d55034f11ed7cb" + }, + "modeltools": { + "Package": "modeltools", + "Version": "0.2-23", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "methods", + "stats", + "stats4" + ], + "Hash": "f5a957c02222589bdf625a67be68b2a9" + }, + "mstate": { + "Package": "mstate", + "Version": "0.3.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "RColorBrewer", + "data.table", + "lattice", + "rlang", + "survival", + "viridisLite" + ], + "Hash": "53ca2f4a1ab4ac93fec33c92dc22c886" + }, + "multcomp": { + "Package": "multcomp", + "Version": "1.4-25", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "TH.data", + "codetools", + "graphics", + "mvtnorm", + "sandwich", + "stats", + "survival" + ], + "Hash": "2688bf2f8d54c19534ee7d8a876d9fc7" + }, + "munsell": { + "Package": "munsell", + "Version": "0.5.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "colorspace", + "methods" + ], + "Hash": "6dfe8bf774944bd5595785e3229d8771" + }, + "mvnfast": { + "Package": "mvnfast", + "Version": "0.2.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "BH", + "Rcpp", + "RcppArmadillo" + ], + "Hash": "e65cac8e8501bdfbdca0412c37bb18c9" + }, + "mvtnorm": { + "Package": "mvtnorm", + "Version": "1.2-4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats" + ], + "Hash": "17e96668f44a28aef0981d9e17c49b59" + }, + "nlme": { + "Package": "nlme", + "Version": "3.1-164", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "graphics", + "lattice", + "stats", + "utils" + ], + "Hash": "a623a2239e642806158bc4dc3f51565d" + }, + "numDeriv": { + "Package": "numDeriv", + "Version": "2016.8-1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "df58958f293b166e4ab885ebcad90e02" + }, + "openssl": { + "Package": "openssl", + "Version": "2.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "askpass" + ], + "Hash": "2a0dc8c6adfb6f032e4d4af82d258ab5" + }, + "pbv": { + "Package": "pbv", + "Version": "0.5-47", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "RcppArmadillo" + ], + "Hash": "b0fa64575651e261cfa1fdb46025cb44" + }, + "pillar": { + "Package": "pillar", + "Version": "1.9.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "cli", + "fansi", + "glue", + "lifecycle", + "rlang", + "utf8", + "utils", + "vctrs" + ], + "Hash": "15da5a8412f317beeee6175fbc76f4bb" + }, + "pkgbuild": { + "Package": "pkgbuild", + "Version": "1.4.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "callr", + "cli", + "desc", + "processx" + ], + "Hash": "c0143443203205e6a2760ce553dafc24" + }, + "pkgconfig": { + "Package": "pkgconfig", "Version": "2.0.3", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "utils" ], - "Hash": "01f28d4278f15c76cddbea05899c5d6f" + "Hash": "01f28d4278f15c76cddbea05899c5d6f" + }, + "pkgdown": { + "Package": "pkgdown", + "Version": "2.0.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "bslib", + "callr", + "cli", + "desc", + "digest", + "downlit", + "fs", + "httr", + "jsonlite", + "magrittr", + "memoise", + "purrr", + "ragg", + "rlang", + "rmarkdown", + "tibble", + "whisker", + "withr", + "xml2", + "yaml" + ], + "Hash": "16fa15449c930bf3a7761d3c68f8abf9" + }, + "pkgload": { + "Package": "pkgload", + "Version": "1.3.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "crayon", + "desc", + "fs", + "glue", + "methods", + "pkgbuild", + "rlang", + "rprojroot", + "utils", + "withr" + ], + "Hash": "876c618df5ae610be84356d5d7a5d124" + }, + "plogr": { + "Package": "plogr", + "Version": "0.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "09eb987710984fc2905c7129c7d85e65" + }, + "plotrix": { + "Package": "plotrix", + "Version": "3.8-4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics", + "stats", + "utils" + ], + "Hash": "d47fdfc45aeba360ce9db50643de3fbd" + }, + "plumber": { + "Package": "plumber", + "Version": "1.2.1", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "R6", + "crayon", + "ellipsis", + "httpuv", + "jsonlite", + "lifecycle", + "magrittr", + "mime", + "promises", + "rlang", + "sodium", + "stringi", + "swagger", + "webutils" + ], + "Hash": "8b65a7a00ef8edc5ddc6fabf0aff1194" + }, + "plyr": { + "Package": "plyr", + "Version": "1.8.9", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp" + ], + "Hash": "6b8177fd19982f0020743fadbfdbd933" + }, + "pool": { + "Package": "pool", + "Version": "1.0.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "DBI", + "R", + "R6", + "later", + "methods", + "rlang" + ], + "Hash": "b336b9f1b3cc72033258c70dc17edbf1" + }, + "praise": { + "Package": "praise", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "a555924add98c99d2f411e37e7d25e9f" + }, + "prettyunits": { + "Package": "prettyunits", + "Version": "1.2.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "6b01fc98b1e86c4f705ce9dcfd2f57c7" + }, + "processx": { + "Package": "processx", + "Version": "3.8.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "ps", + "utils" + ], + "Hash": "82d48b1aec56084d9438dbf98087a7e9" + }, + "profvis": { + "Package": "profvis", + "Version": "0.3.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "htmlwidgets", + "purrr", + "rlang", + "stringr", + "vctrs" + ], + "Hash": "aa5a3864397ce6ae03458f98618395a1" + }, + "progress": { + "Package": "progress", + "Version": "1.2.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "crayon", + "hms", + "prettyunits" + ], + "Hash": "f4625e061cb2865f111b47ff163a5ca6" + }, + "promises": { + "Package": "promises", + "Version": "1.2.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R6", + "Rcpp", + "fastmap", + "later", + "magrittr", + "rlang", + "stats" + ], + "Hash": "0d8a15c9d000970ada1ab21405387dee" + }, + "proxy": { + "Package": "proxy", + "Version": "0.4-27", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats", + "utils" + ], + "Hash": "e0ef355c12942cf7a6b91a6cfaea8b3e" + }, + "ps": { + "Package": "ps", + "Version": "1.7.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "utils" + ], + "Hash": "dd2b9319ee0656c8acf45c7f40c59de7" }, - "pkgload": { - "Package": "pkgload", - "Version": "1.3.3", + "purrr": { + "Package": "purrr", + "Version": "1.0.2", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", "cli", - "crayon", - "desc", - "fs", - "glue", + "lifecycle", + "magrittr", + "rlang", + "vctrs" + ], + "Hash": "1cba04a4e9414bdefc9dcaa99649a8dc" + }, + "r2rtf": { + "Package": "r2rtf", + "Version": "1.1.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "tools" + ], + "Hash": "807989b4dccfab6440841a5e8aaa95f1" + }, + "ragg": { + "Package": "ragg", + "Version": "1.2.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "systemfonts", + "textshaping" + ], + "Hash": "90a1b8b7e518d7f90480d56453b4d062" + }, + "randomizeR": { + "Package": "randomizeR", + "Version": "3.0.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "PwrGSD", + "R", + "coin", + "dplyr", + "ggplot2", + "gsDesign", + "insight", + "magrittr", "methods", - "pkgbuild", + "mstate", + "mvtnorm", + "plotrix", + "purrr", + "reshape2", "rlang", + "survival" + ], + "Hash": "d22309ab2b609eb233d4b2e931dad265" + }, + "rappdirs": { + "Package": "rappdirs", + "Version": "0.3.3", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R" + ], + "Hash": "5e3c5dc0b071b21fa128676560dbe94d" + }, + "rcmdcheck": { + "Package": "rcmdcheck", + "Version": "1.4.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R6", + "callr", + "cli", + "curl", + "desc", + "digest", + "pkgbuild", + "prettyunits", "rprojroot", + "sessioninfo", "utils", - "withr" + "withr", + "xopen" ], - "Hash": "903d68319ae9923fb2e2ee7fa8230b91" + "Hash": "8f25ebe2ec38b1f2aef3b0d2ef76f6c4" }, - "plumber": { - "Package": "plumber", - "Version": "1.2.1", + "reactR": { + "Package": "reactR", + "Version": "0.5.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", + "Requirements": [ + "htmltools" + ], + "Hash": "c9014fd1a435b2d790dd506589cb24e5" + }, + "reactable": { + "Package": "reactable", + "Version": "0.4.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "digest", + "htmltools", + "htmlwidgets", + "jsonlite", + "reactR" + ], + "Hash": "6069eb2a6597963eae0605c1875ff14c" + }, + "readr": { + "Package": "readr", + "Version": "2.1.5", + "Source": "Repository", + "Repository": "RSPM", "Requirements": [ "R", "R6", + "cli", + "clipr", + "cpp11", "crayon", - "ellipsis", - "httpuv", - "jsonlite", + "hms", "lifecycle", - "magrittr", - "mime", - "promises", + "methods", "rlang", - "sodium", - "stringi", - "swagger", - "webutils" + "tibble", + "tzdb", + "utils", + "vroom" ], - "Hash": "8b65a7a00ef8edc5ddc6fabf0aff1194" + "Hash": "9de96463d2117f6ac49980577939dfb3" }, - "praise": { - "Package": "praise", - "Version": "1.0.0", + "rematch2": { + "Package": "rematch2", + "Version": "2.1.2", "Source": "Repository", "Repository": "RSPM", - "Hash": "a555924add98c99d2f411e37e7d25e9f" + "Requirements": [ + "tibble" + ], + "Hash": "76c9e04c712a05848ae7a23d2f170a40" }, - "prettyunits": { - "Package": "prettyunits", - "Version": "1.2.0", + "remotes": { + "Package": "remotes", + "Version": "2.4.2.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "methods", + "stats", + "tools", + "utils" + ], + "Hash": "63d15047eb239f95160112bcadc4fcb9" + }, + "renv": { + "Package": "renv", + "Version": "1.0.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "utils" + ], + "Hash": "32c3f93e8360f667ca5863272ec8ba6a" + }, + "reshape2": { + "Package": "reshape2", + "Version": "1.4.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "Rcpp", + "plyr", + "stringr" + ], + "Hash": "bb5996d0bd962d214a11140d77589917" + }, + "rlang": { + "Package": "rlang", + "Version": "1.1.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "utils" + ], + "Hash": "42548638fae05fd9a9b5f3f437fbbbe2" + }, + "rmarkdown": { + "Package": "rmarkdown", + "Version": "2.25", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "bslib", + "evaluate", + "fontawesome", + "htmltools", + "jquerylib", + "jsonlite", + "knitr", + "methods", + "stringr", + "tinytex", + "tools", + "utils", + "xfun", + "yaml" + ], + "Hash": "d65e35823c817f09f4de424fcdfa812a" + }, + "roxygen2": { + "Package": "roxygen2", + "Version": "7.3.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "R6", + "brew", + "cli", + "commonmark", + "cpp11", + "desc", + "knitr", + "methods", + "pkgload", + "purrr", + "rlang", + "stringi", + "stringr", + "utils", + "withr", + "xml2" + ], + "Hash": "c25fe7b2d8cba73d1b63c947bf7afdb9" + }, + "rprojroot": { + "Package": "rprojroot", + "Version": "2.0.4", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "6b01fc98b1e86c4f705ce9dcfd2f57c7" + "Hash": "4c8415e0ec1e29f3f4f6fc108bef0144" }, - "processx": { - "Package": "processx", - "Version": "3.8.2", + "rstudioapi": { + "Package": "rstudioapi", + "Version": "0.15.0", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "5564500e25cffad9e22244ced1379887" + }, + "rversions": { + "Package": "rversions", + "Version": "2.1.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "curl", + "utils", + "xml2" + ], + "Hash": "a9881dfed103e83f9de151dc17002cd1" + }, + "sandwich": { + "Package": "sandwich", + "Version": "3.1-0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats", + "utils", + "zoo" + ], + "Hash": "1cf6ae532f0179350862fefeb0987c9b" + }, + "sass": { + "Package": "sass", + "Version": "0.4.8", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R6", + "fs", + "htmltools", + "rappdirs", + "rlang" + ], + "Hash": "168f9353c76d4c4b0a0bbf72e2c2d035" + }, + "scales": { + "Package": "scales", + "Version": "1.3.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", "R6", - "ps", + "RColorBrewer", + "cli", + "farver", + "glue", + "labeling", + "lifecycle", + "munsell", + "rlang", + "viridisLite" + ], + "Hash": "c19df082ba346b0ffa6f833e92de34d1" + }, + "sentryR": { + "Package": "sentryR", + "Version": "1.1.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "httr", + "jsonlite", + "stats", + "stringr", + "tibble", + "uuid" + ], + "Hash": "f37e91d605fbf665d7b5467ded4e539e" + }, + "sessioninfo": { + "Package": "sessioninfo", + "Version": "1.2.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "tools", "utils" ], - "Hash": "3efbd8ac1be0296a46c55387aeace0f3" + "Hash": "3f9796a8d0a0e8c6eb49a4b029359d1f" }, - "promises": { - "Package": "promises", - "Version": "1.2.1", + "shiny": { + "Package": "shiny", + "Version": "1.8.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ + "R", "R6", - "Rcpp", + "bslib", + "cachem", + "commonmark", + "crayon", + "ellipsis", "fastmap", + "fontawesome", + "glue", + "grDevices", + "htmltools", + "httpuv", + "jsonlite", "later", - "magrittr", + "lifecycle", + "methods", + "mime", + "promises", "rlang", - "stats" + "sourcetools", + "tools", + "utils", + "withr", + "xtable" ], - "Hash": "0d8a15c9d000970ada1ab21405387dee" + "Hash": "3a1f41807d648a908e3c7f0334bf85e6" }, - "ps": { - "Package": "ps", - "Version": "1.7.5", + "simstudy": { + "Package": "simstudy", + "Version": "0.7.1", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", - "utils" + "Rcpp", + "backports", + "data.table", + "fastglm", + "glue", + "methods", + "mvnfast", + "pbv" ], - "Hash": "709d852d33178db54b17c722e5b1e594" + "Hash": "deb66424ac81e3aa78066791e0e6b97f" }, - "rappdirs": { - "Package": "rappdirs", - "Version": "0.3.3", + "sodium": { + "Package": "sodium", + "Version": "1.3.1", "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R" - ], - "Hash": "5e3c5dc0b071b21fa128676560dbe94d" + "Repository": "RSPM", + "Hash": "dd86d6fd2a01d4eb3777dfdee7076d56" }, - "rematch2": { - "Package": "rematch2", - "Version": "2.1.2", + "sourcetools": { + "Package": "sourcetools", + "Version": "0.1.7-1", "Source": "Repository", "Repository": "RSPM", "Requirements": [ - "tibble" + "R" ], - "Hash": "76c9e04c712a05848ae7a23d2f170a40" + "Hash": "5f5a7629f956619d519205ec475fe647" }, - "renv": { - "Package": "renv", - "Version": "1.0.0", + "stringi": { + "Package": "stringi", + "Version": "1.8.3", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ + "R", + "stats", + "tools", "utils" ], - "Hash": "c321cd99d56443dbffd1c9e673c0c1a2" + "Hash": "058aebddea264f4c99401515182e656a" }, - "rlang": { - "Package": "rlang", - "Version": "1.1.1", + "stringr": { + "Package": "stringr", + "Version": "1.5.1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", - "utils" + "cli", + "glue", + "lifecycle", + "magrittr", + "rlang", + "stringi", + "vctrs" ], - "Hash": "a85c767b55f0bf9b7ad16c6d7baee5bb" + "Hash": "960e2ae9e09656611e0b8214ad543207" }, - "rprojroot": { - "Package": "rprojroot", - "Version": "2.0.3", + "survey": { + "Package": "survey", + "Version": "4.2-1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ - "R" + "Matrix", + "R", + "graphics", + "grid", + "lattice", + "methods", + "minqa", + "mitools", + "numDeriv", + "splines", + "stats", + "survival" ], - "Hash": "1de7ab598047a87bba48434ba35d497d" + "Hash": "03195177db81a992f22361f8f54852f4" }, - "sodium": { - "Package": "sodium", - "Version": "1.3.0", + "survival": { + "Package": "survival", + "Version": "3.5-8", "Source": "Repository", "Repository": "RSPM", - "Hash": "bd436c1e48dc1982125e4d955017724e" - }, - "stringi": { - "Package": "stringi", - "Version": "1.7.12", - "Source": "Repository", - "Repository": "CRAN", "Requirements": [ + "Matrix", "R", + "graphics", + "methods", + "splines", "stats", - "tools", "utils" ], - "Hash": "ca8bd84263c77310739d2cf64d84d7c9" + "Hash": "184d7799bca4ba8c3be72ea396f4b9a3" }, "swagger": { "Package": "swagger", @@ -548,9 +2318,36 @@ "Repository": "RSPM", "Hash": "3a1be13d68d47a8cd0bfd74739ca1555" }, + "systemfonts": { + "Package": "systemfonts", + "Version": "1.0.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cpp11" + ], + "Hash": "15b594369e70b975ba9f064295983499" + }, + "tableone": { + "Package": "tableone", + "Version": "0.13.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "MASS", + "e1071", + "gmodels", + "labelled", + "nlme", + "survey", + "zoo" + ], + "Hash": "b1a77da61a4c3585987241b8a1cc6b95" + }, "testthat": { "Package": "testthat", - "Version": "3.2.0", + "Version": "3.2.1", "Source": "Repository", "Repository": "RSPM", "Requirements": [ @@ -561,7 +2358,6 @@ "cli", "desc", "digest", - "ellipsis", "evaluate", "jsonlite", "lifecycle", @@ -576,7 +2372,19 @@ "waldo", "withr" ], - "Hash": "877508719fcb8c9525eccdadf07a5102" + "Hash": "4767a686ebe986e6cb01d075b3f09729" + }, + "textshaping": { + "Package": "textshaping", + "Version": "0.3.7", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cpp11", + "systemfonts" + ], + "Hash": "997aac9ad649e0ef3b97f96cddd5622b" }, "tibble": { "Package": "tibble", @@ -597,19 +2405,155 @@ ], "Hash": "a84e2cc86d07289b3b6f5069df7a004c" }, + "tidyr": { + "Package": "tidyr", + "Version": "1.3.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "cpp11", + "dplyr", + "glue", + "lifecycle", + "magrittr", + "purrr", + "rlang", + "stringr", + "tibble", + "tidyselect", + "utils", + "vctrs" + ], + "Hash": "915fb7ce036c22a6a33b5a8adb712eb1" + }, + "tidyselect": { + "Package": "tidyselect", + "Version": "1.2.0", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "cli", + "glue", + "lifecycle", + "rlang", + "vctrs", + "withr" + ], + "Hash": "79540e5fcd9e0435af547d885f184fd5" + }, + "timechange": { + "Package": "timechange", + "Version": "0.3.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cpp11" + ], + "Hash": "c5f3c201b931cd6474d17d8700ccb1c8" + }, + "tinytex": { + "Package": "tinytex", + "Version": "0.49", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "xfun" + ], + "Hash": "5ac22900ae0f386e54f1c307eca7d843" + }, + "truncnorm": { + "Package": "truncnorm", + "Version": "1.0-9", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "ef5b32c5194351ff409dfb37ca9468f1" + }, + "tzdb": { + "Package": "tzdb", + "Version": "0.4.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cpp11" + ], + "Hash": "f561504ec2897f4d46f0c7657e488ae1" + }, + "urlchecker": { + "Package": "urlchecker", + "Version": "1.0.1", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "curl", + "tools", + "xml2" + ], + "Hash": "409328b8e1253c8d729a7836fe7f7a16" + }, + "usethis": { + "Package": "usethis", + "Version": "2.2.3", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "clipr", + "crayon", + "curl", + "desc", + "fs", + "gert", + "gh", + "glue", + "jsonlite", + "lifecycle", + "purrr", + "rappdirs", + "rlang", + "rprojroot", + "rstudioapi", + "stats", + "utils", + "whisker", + "withr", + "yaml" + ], + "Hash": "d524fd42c517035027f866064417d7e6" + }, "utf8": { "Package": "utf8", - "Version": "1.2.3", + "Version": "1.2.4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "62b65c52671e6665f803ff02954446e9" + }, + "uuid": { + "Package": "uuid", + "Version": "1.2-0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "1fe17157424bb09c48a8b3b550c753bc" + "Hash": "303c19bfd970bece872f93a824e323d9" }, "vctrs": { "Package": "vctrs", - "Version": "0.6.4", + "Version": "0.6.5", "Source": "Repository", "Repository": "RSPM", "Requirements": [ @@ -619,14 +2563,51 @@ "lifecycle", "rlang" ], - "Hash": "266c1ca411266ba8f365fcc726444b87" + "Hash": "c03fa420630029418f7e6da3667aac4a" + }, + "viridisLite": { + "Package": "viridisLite", + "Version": "0.4.2", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R" + ], + "Hash": "c826c7c4241b6fc89ff55aaea3fa7491" + }, + "vroom": { + "Package": "vroom", + "Version": "1.6.5", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "bit64", + "cli", + "cpp11", + "crayon", + "glue", + "hms", + "lifecycle", + "methods", + "progress", + "rlang", + "stats", + "tibble", + "tidyselect", + "tzdb", + "vctrs", + "withr" + ], + "Hash": "390f9315bc0025be03012054103d227c" }, "waldo": { "Package": "waldo", - "Version": "0.5.1", + "Version": "0.5.2", "Source": "Repository", "Repository": "RSPM", "Requirements": [ + "R", "cli", "diffobj", "fansi", @@ -636,31 +2617,114 @@ "rlang", "tibble" ], - "Hash": "2c993415154cdb94649d99ae138ff5e5" + "Hash": "c7d3fd6d29ab077cbac8f0e2751449e6" }, "webutils": { "Package": "webutils", - "Version": "1.1", + "Version": "1.2.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "curl", "jsonlite" ], - "Hash": "75d8b5b05fe22659b54076563f83f26a" + "Hash": "6a7f2a3084c7249d2f1466d6e4cc2e84" + }, + "whisker": { + "Package": "whisker", + "Version": "0.4.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "c6abfa47a46d281a7d5159d0a8891e88" }, "withr": { "Package": "withr", - "Version": "2.5.1", + "Version": "3.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "grDevices", + "graphics" + ], + "Hash": "d31b6c62c10dcf11ec530ca6b0dd5d35" + }, + "xfun": { + "Package": "xfun", + "Version": "0.42", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "grDevices", + "stats", + "tools" + ], + "Hash": "fd1349170df31f7a10bd98b0189e85af" + }, + "xml2": { + "Package": "xml2", + "Version": "1.3.6", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "cli", + "methods", + "rlang" + ], + "Hash": "1d0336142f4cd25d8d23cd3ba7a8fb61" + }, + "xopen": { + "Package": "xopen", + "Version": "1.0.0", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "processx" + ], + "Hash": "6c85f015dee9cc7710ddd20f86881f58" + }, + "xtable": { + "Package": "xtable", + "Version": "1.8-4", + "Source": "Repository", + "Repository": "RSPM", + "Requirements": [ + "R", + "stats", + "utils" + ], + "Hash": "b8acdf8af494d9ec19ccb2481a9b11c2" + }, + "yaml": { + "Package": "yaml", + "Version": "2.3.8", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "29240487a071f535f5e5d5a323b7afbd" + }, + "zip": { + "Package": "zip", + "Version": "2.3.1", + "Source": "Repository", + "Repository": "RSPM", + "Hash": "fcc4bd8e6da2d2011eb64a5e5cc685ab" + }, + "zoo": { + "Package": "zoo", + "Version": "1.8-12", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", "grDevices", "graphics", - "stats" + "lattice", + "stats", + "utils" ], - "Hash": "d77c6f74be05c33164e33fbc85540cae" + "Hash": "5c715954112b45499fb1dadc6ee6ee3e" } } } diff --git a/renv/.dockerignore b/renv/.dockerignore new file mode 100644 index 0000000..0ec0cbb --- /dev/null +++ b/renv/.dockerignore @@ -0,0 +1,7 @@ +library/ +local/ +cellar/ +lock/ +python/ +sandbox/ +staging/ diff --git a/renv/activate.R b/renv/activate.R index cc742fc..9b2e7f1 100644 --- a/renv/activate.R +++ b/renv/activate.R @@ -2,12 +2,27 @@ local({ # the requested version of renv - version <- "1.0.0" + version <- "1.0.5" attr(version, "sha") <- NULL # the project directory project <- getwd() + # use start-up diagnostics if enabled + diagnostics <- Sys.getenv("RENV_STARTUP_DIAGNOSTICS", unset = "FALSE") + if (diagnostics) { + start <- Sys.time() + profile <- tempfile("renv-startup-", fileext = ".Rprof") + utils::Rprof(profile) + on.exit({ + utils::Rprof(NULL) + elapsed <- signif(difftime(Sys.time(), start, units = "auto"), digits = 2L) + writeLines(sprintf("- renv took %s to run the autoloader.", format(elapsed))) + writeLines(sprintf("- Profile: %s", profile)) + print(utils::summaryRprof(profile)) + }, add = TRUE) + } + # figure out whether the autoloader is enabled enabled <- local({ @@ -16,6 +31,14 @@ local({ if (!is.null(override)) return(override) + # if we're being run in a context where R_LIBS is already set, + # don't load -- presumably we're being run as a sub-process and + # the parent process has already set up library paths for us + rcmd <- Sys.getenv("R_CMD", unset = NA) + rlibs <- Sys.getenv("R_LIBS", unset = NA) + if (!is.na(rlibs) && !is.na(rcmd)) + return(FALSE) + # next, check environment variables # TODO: prefer using the configuration one in the future envvars <- c( @@ -35,9 +58,22 @@ local({ }) - if (!enabled) + # bail if we're not enabled + if (!enabled) { + + # if we're not enabled, we might still need to manually load + # the user profile here + profile <- Sys.getenv("R_PROFILE_USER", unset = "~/.Rprofile") + if (file.exists(profile)) { + cfg <- Sys.getenv("RENV_CONFIG_USER_PROFILE", unset = "TRUE") + if (tolower(cfg) %in% c("true", "t", "1")) + sys.source(profile, envir = globalenv()) + } + return(FALSE) + } + # avoid recursion if (identical(getOption("renv.autoloader.running"), TRUE)) { warning("ignoring recursive attempt to run renv autoloader") @@ -504,7 +540,7 @@ local({ # open the bundle for reading # We use gzcon for everything because (from ?gzcon) - # > Reading from a connection which does not supply a ‘gzip’ magic + # > Reading from a connection which does not supply a 'gzip' magic # > header is equivalent to reading from the original connection conn <- gzcon(file(bundle, open = "rb", raw = TRUE)) on.exit(close(conn)) @@ -767,10 +803,12 @@ local({ renv_bootstrap_validate_version <- function(version, description = NULL) { # resolve description file - description <- description %||% { - path <- getNamespaceInfo("renv", "path") - packageDescription("renv", lib.loc = dirname(path)) - } + # + # avoid passing lib.loc to `packageDescription()` below, since R will + # use the loaded version of the package by default anyhow. note that + # this function should only be called after 'renv' is loaded + # https://github.com/rstudio/renv/issues/1625 + description <- description %||% packageDescription("renv") # check whether requested version 'version' matches loaded version of renv sha <- attr(version, "sha", exact = TRUE) @@ -841,7 +879,7 @@ local({ hooks <- getHook("renv::autoload") for (hook in hooks) if (is.function(hook)) - tryCatch(hook(), error = warning) + tryCatch(hook(), error = warnify) # load the project renv::load(project) @@ -982,10 +1020,15 @@ local({ } - renv_bootstrap_version_friendly <- function(version, sha = NULL) { + renv_bootstrap_version_friendly <- function(version, shafmt = NULL, sha = NULL) { sha <- sha %||% attr(version, "sha", exact = TRUE) - parts <- c(version, sprintf("[sha: %s]", substring(sha, 1L, 7L))) - paste(parts, collapse = " ") + parts <- c(version, sprintf(shafmt %||% " [sha: %s]", substring(sha, 1L, 7L))) + paste(parts, collapse = "") + } + + renv_bootstrap_exec <- function(project, libpath, version) { + if (!renv_bootstrap_load(project, libpath, version)) + renv_bootstrap_run(version, libpath) } renv_bootstrap_run <- function(version, libpath) { @@ -1012,11 +1055,6 @@ local({ } - - renv_bootstrap_in_rstudio <- function() { - commandArgs()[[1]] == "RStudio" - } - renv_json_read <- function(file = NULL, text = NULL) { jlerr <- NULL @@ -1024,7 +1062,7 @@ local({ # if jsonlite is loaded, use that instead if ("jsonlite" %in% loadedNamespaces()) { - json <- catch(renv_json_read_jsonlite(file, text)) + json <- tryCatch(renv_json_read_jsonlite(file, text), error = identity) if (!inherits(json, "error")) return(json) @@ -1033,7 +1071,7 @@ local({ } # otherwise, fall back to the default JSON reader - json <- catch(renv_json_read_default(file, text)) + json <- tryCatch(renv_json_read_default(file, text), error = identity) if (!inherits(json, "error")) return(json) @@ -1046,14 +1084,14 @@ local({ } renv_json_read_jsonlite <- function(file = NULL, text = NULL) { - text <- paste(text %||% read(file), collapse = "\n") + text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") jsonlite::fromJSON(txt = text, simplifyVector = FALSE) } renv_json_read_default <- function(file = NULL, text = NULL) { # find strings in the JSON - text <- paste(text %||% read(file), collapse = "\n") + text <- paste(text %||% readLines(file, warn = FALSE), collapse = "\n") pattern <- '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]' locs <- gregexpr(pattern, text, perl = TRUE)[[1]] @@ -1101,14 +1139,14 @@ local({ map <- as.list(map) # remap strings in object - remapped <- renv_json_remap(json, map) + remapped <- renv_json_read_remap(json, map) # evaluate eval(remapped, envir = baseenv()) } - renv_json_remap <- function(json, map) { + renv_json_read_remap <- function(json, map) { # fix names if (!is.null(names(json))) { @@ -1135,7 +1173,7 @@ local({ # recurse if (is.recursive(json)) { for (i in seq_along(json)) { - json[i] <- list(renv_json_remap(json[[i]], map)) + json[i] <- list(renv_json_read_remap(json[[i]], map)) } } @@ -1155,26 +1193,8 @@ local({ # construct full libpath libpath <- file.path(root, prefix) - # attempt to load - if (renv_bootstrap_load(project, libpath, version)) - return(TRUE) - - if (renv_bootstrap_in_rstudio()) { - setHook("rstudio.sessionInit", function(...) { - renv_bootstrap_run(version, libpath) - - # Work around buglet in RStudio if hook uses readline - tryCatch( - { - tools <- as.environment("tools:rstudio") - tools$.rs.api.sendToConsole("", echo = FALSE, focus = FALSE) - }, - error = function(cnd) {} - ) - }) - } else { - renv_bootstrap_run(version, libpath) - } + # run bootstrap code + renv_bootstrap_exec(project, libpath, version) invisible() diff --git a/run_psql.sh b/run_psql.sh new file mode 100755 index 0000000..c730a83 --- /dev/null +++ b/run_psql.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +export PGPASSWORD="$POSTGRES_PASSWORD" + +psql --host "$POSTGRES_HOST" \ + --port "${POSTGRES_PORT:-5432}" \ + --username "$POSTGRES_USER" \ + --dbname "$POSTGRES_DB" \ + "$@" diff --git a/run_tests.sh b/run_tests.sh new file mode 100644 index 0000000..39ef2f9 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -e +echo "Running tests" + +R --quiet --no-save -e "devtools::load_all(); testthat::test_package('unbiased')" diff --git a/run_tests_with_coverage.sh b/run_tests_with_coverage.sh new file mode 100644 index 0000000..f2d272d --- /dev/null +++ b/run_tests_with_coverage.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +echo "Running tests" + +R --quiet --no-save -e "devtools::load_all(); covr::package_coverage()" diff --git a/start_unbiased_api.sh b/start_unbiased_api.sh new file mode 100755 index 0000000..991cccc --- /dev/null +++ b/start_unbiased_api.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +echo "Running unbiased" + +R --quiet --no-save -e "devtools::load_all(); unbiased:::run_unbiased()" diff --git a/tests/testthat.R b/tests/testthat.R index 35c4c98..ac6d738 100644 --- a/tests/testthat.R +++ b/tests/testthat.R @@ -7,7 +7,6 @@ # * https://testthat.r-lib.org/articles/special-files.html library(testthat) -library(checkmate) -library(httr2) +library(unbiased) -test_dir(fs::path("tests", "testthat")) +test_check("unbiased") diff --git a/tests/testthat/.gitignore b/tests/testthat/.gitignore new file mode 100644 index 0000000..6a5a9b5 --- /dev/null +++ b/tests/testthat/.gitignore @@ -0,0 +1 @@ +testthat-problems.rds \ No newline at end of file diff --git a/tests/testthat/audit-log-test-helpers.R b/tests/testthat/audit-log-test-helpers.R new file mode 100644 index 0000000..f7b2e54 --- /dev/null +++ b/tests/testthat/audit-log-test-helpers.R @@ -0,0 +1,64 @@ +#' Assert Events Logged in Audit Trail +#' +#' This function checks if the expected events have been logged in the 'audit_log' table in the database. +#' This function should be used at the beginning of a test to ensure that the expected events are logged. +#' @param events A vector of expected event types that should be logged, in order +#' +#' @return This function does not return a value. It throws an error if the assertions fail. +#' +#' @examples +#' \dontrun{ +#' assert_events_logged(c("event1", "event2")) +#' } +assert_audit_trail_for_test <- function(events = list(), env = parent.frame()) { + # Get count of events logged from audit_log table in database + pool <- get("db_connection_pool", envir = .GlobalEnv) + conn <- pool::localCheckout(pool) + + event_count <- DBI::dbGetQuery( + conn, + "SELECT COUNT(*) FROM audit_log" + )$count + + withr::defer( + { + # gen new count + new_event_count <- DBI::dbGetQuery( + conn, + "SELECT COUNT(*) FROM audit_log" + )$count + + n <- length(events) + + # assert that the count has increased by number of events + testthat::expect_identical( + new_event_count, + event_count + n, + info = glue::glue("Expected {n} events to be logged") + ) + + if (n > 0) { + # get the last n events + last_n_events <- DBI::dbGetQuery( + conn, + glue::glue_sql( + "SELECT * FROM audit_log ORDER BY created_at DESC LIMIT {n};", + .con = conn + ) + ) + + event_types <- last_n_events |> + dplyr::pull("event_type") |> + rev() + + # assert that the last n events are the expected events + testthat::expect_equal( + event_types, + events, + info = "Expected events to be logged" + ) + } + }, + env + ) +} diff --git a/tests/testthat/fixtures/example_audit_logs.yml b/tests/testthat/fixtures/example_audit_logs.yml new file mode 100644 index 0000000..5b28b3c --- /dev/null +++ b/tests/testthat/fixtures/example_audit_logs.yml @@ -0,0 +1,87 @@ +study: + - identifier: 'TEST' + name: 'Test Study' + method: 'minimisation_pocock' + parameters: '{}' + - identifier: 'TEST2' + name: 'Test Study 2' + method: 'minimisation_pocock' + parameters: '{}' + - identifier: 'TEST3' + name: 'Test Study 3' + method: 'minimisation_pocock' + parameters: '{}' + +audit_log: + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70001" + created_at: "2022-02-16T10:27:53Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0001" + study_id: 1 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70002" + created_at: "2022-02-16T10:27:53Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0002" + study_id: 2 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70003" + created_at: "2022-02-16T10:27:53Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0003" + study_id: 2 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70004" + created_at: "2023-02-16T10:27:53Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0004" + study_id: 2 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70005" + created_at: "2022-02-16T10:27:54Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0004" + study_id: 2 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" + - id: "c12d29e7-1b44-4cb6-a9c1-1f427fe70006" + created_at: "2022-02-16T10:27:53Z" + event_type: "example_event" + request_id: "427ac2db-166d-4236-b040-94213f1b0006" + study_id: 3 + endpoint_url: "/api/example" + request_method: "GET" + request_body: '{"key1": "value1", "key2": "value2"}' + response_code: 200 + response_body: '{"key1": "value1", "key2": "value2"}' + ip_address: "8.8.8.8" + user_agent: "Mozilla" diff --git a/tests/testthat/fixtures/example_db.yml b/tests/testthat/fixtures/example_db.yml new file mode 100644 index 0000000..45b054e --- /dev/null +++ b/tests/testthat/fixtures/example_db.yml @@ -0,0 +1,99 @@ +study: + - identifier: 'TEST' + name: 'Test Study' + method: 'minimisation_pocock' + parameters: '{"method": "var", "p": 0.85, "weights": {"gender": 1}}' + # Waring: id is set automatically by the database + # do not set it manually because sequences will be out of sync + # and you will get errors + # id: 1 + - identifier: 'TEST2' + name: 'Test Study 2' + method: 'minimisation_pocock' + parameters: '{"method": "var", "p": 0.85, "weights": {"gender": 1}}' + # id: 2 + +arm: + - study_id: 1 + name: 'placebo' + ratio: 2 + # id: 1 + - study_id: 1 + name: 'active' + ratio: 1 + # id: 2 + - study_id: 2 + name: 'placebo' + ratio: 2 + # id: 3 + - study_id: 2 + name: 'active' + ratio: 1 + # id: 4 + +stratum: + - study_id: 1 + name: 'gender' + value_type: 'factor' + # id: 1 + - study_id: 2 + name: 'gender' + value_type: 'factor' + # id: 2 + +factor_constraint: + - stratum_id: 1 + value: 'F' + - stratum_id: 1 + value: 'M' + - stratum_id: 2 + value: 'F' + - stratum_id: 2 + value: 'M' + +patient: + - study_id: 1 + arm_id: 1 + used: true + # id: 1 + - study_id: 1 + arm_id: 2 + used: true + # id: 2 + - study_id: 1 + arm_id: 2 + used: true + # id: 3 + - study_id: 1 + arm_id: 1 + used: true + # id: 4 + - study_id: 2 + arm_id: 3 + used: true + # id: 5 + - study_id: 2 + arm_id: 4 + used: true + # id: 6 + +patient_stratum: + - patient_id: 1 + stratum_id: 1 + fct_value: 'F' + - patient_id: 2 + stratum_id: 1 + fct_value: 'M' + - patient_id: 3 + stratum_id: 1 + fct_value: 'F' + - patient_id: 4 + stratum_id: 1 + fct_value: 'M' + - patient_id: 5 + stratum_id: 2 + fct_value: 'M' + - patient_id: 6 + stratum_id: 2 + fct_value: 'F' + diff --git a/tests/testthat/setup-api.R b/tests/testthat/setup-api.R deleted file mode 100644 index ec4d80b..0000000 --- a/tests/testthat/setup-api.R +++ /dev/null @@ -1,28 +0,0 @@ -api_url <- "http://api:3838" - -if (!isTRUE(as.logical(Sys.getenv("CI")))) { - withr::local_envvar( - # Extract current SHA and set it as a temporary env var - list(GITHUB_SHA = system("git rev-parse HEAD", intern = TRUE)) - ) - - # Overwrite API URL if not on CI - api_url <- "http://localhost:3838" - api_path <- tempdir() - - # Start the API - api <- callr::r_bg(\(path) { - # 1. Set path to `path` - # 2. Build a plumber API - plumber::plumb(dir = fs::path("..", "..", "api")) |> - plumber::pr_run(port = 3838) - }, args = list(path = api_path)) - - # Wait until started - while (!api$is_alive()) { - Sys.sleep(.2) - } - - # Close API upon exiting - withr::defer({ api$kill() }, teardown_env()) -} diff --git a/tests/testthat/setup-testing-environment.R b/tests/testthat/setup-testing-environment.R new file mode 100644 index 0000000..dd4d790 --- /dev/null +++ b/tests/testthat/setup-testing-environment.R @@ -0,0 +1,288 @@ +library(checkmate) +library(dplyr) +library(dbplyr) +library(httr2) + +run_psql <- function(statement) { + withr::local_envvar( + PGPASSWORD = Sys.getenv("POSTGRES_PASSWORD") + ) + + # Construct the command + command <- paste( + "psql", + "--host", shQuote(Sys.getenv("POSTGRES_HOST")), + "--port", shQuote(Sys.getenv("POSTGRES_PORT")), + "--username", shQuote(Sys.getenv("POSTGRES_USER")), + "--dbname", shQuote(Sys.getenv("POSTGRES_DB")), + "--command", shQuote(statement), + sep = " " + ) + + system(command, intern = TRUE) +} + +run_migrations <- function() { + # Construct the connection string + user <- Sys.getenv("POSTGRES_USER") + password <- Sys.getenv("POSTGRES_PASSWORD") + host <- Sys.getenv("POSTGRES_HOST") + port <- Sys.getenv("POSTGRES_PORT", "5432") + db <- Sys.getenv("POSTGRES_DB") + + print( + glue::glue( + "Running migrations on database {db} at {host}:{port}" + ) + ) + + migrations_path <- glue::glue( + "{root_repo_directory}/inst/db/migrations" + ) + if (!dir.exists(migrations_path)) { + # If the migrations directory does not exist + # we will assume that the package is installed + # and inst directory content is copied to the root directory + migrations_path <- glue::glue( + "{root_repo_directory}/db/migrations" + ) + } + + db_connection_string <- + glue::glue( + "postgres://{user}:{password}@{host}:{port}/{db}?sslmode=disable" + ) + command <- "migrate" + args <- c( + "-database", + db_connection_string, + "-path", + migrations_path, + "up" + ) + + system2(command, args) +} + +create_database <- function(db_name) { + # make sure we are not creating the database that we are using for connection + assert( + db_name != Sys.getenv("POSTGRES_DB"), + "Cannot create the database that is used for connection" + ) + print( + glue::glue( + "Creating database {db_name}" + ) + ) + run_psql( + glue::glue( + "CREATE DATABASE {db_name}" + ) + ) +} + +drop_database <- function(db_name) { + # make sure we are not dropping the database that we are using for connection + assert( + db_name != Sys.getenv("POSTGRES_DB"), + "Cannot drop the database that is used for connection" + ) + # first, terminate all connections to the database + print( + glue::glue( + "Terminating all connections to the database {db_name}" + ) + ) + run_psql( + glue::glue( + "SELECT pg_terminate_backend(pg_stat_activity.pid) + FROM pg_stat_activity + WHERE pg_stat_activity.datname = '{db_name}' + AND pid <> pg_backend_pid();" + ) + ) + print( + glue::glue( + "Dropping database {db_name}" + ) + ) + run_psql( + glue::glue( + "DROP DATABASE {db_name}" + ) + ) +} + +setup_test_db_connection_pool <- function(envir = parent.frame()) { + # We will create a connection pool to the database + # and store it in the global environment + # so that we can use it in the tests + # without having to pass it around + db_connection_pool <- unbiased:::create_db_connection_pool() + assign("db_connection_pool", db_connection_pool, envir = globalenv()) + withr::defer( + { + print("Closing database connection pool") + db_connection_pool$close() + assign("db_connection_pool", NULL, envir = globalenv()) + }, + envir = envir + ) +} + +# Make sure to disable Sentry during testing +withr::local_envvar( + SENTRY_DSN = NULL +) + +# We will always run the API on the localhost +# and on a random port +api_host <- "127.0.0.1" +api_port <- httpuv::randomPort() + +api_url <- glue::glue("http://{api_host}:{api_port}") +print(glue::glue("API URL: {api_url}")) + +# make sure we are in the root directory of the repository +# this is necessary to run the database migrations +# as well as to run the plumber API +current_working_dir <- getwd() +root_repo_directory <- + glue::glue(current_working_dir, "/../../") |> + normalizePath() +setwd(root_repo_directory) + +# append __test suffix to the database name +# we will use this as a convention to create a test database +# we have to avoid messing with the original database +db_name <- Sys.getenv("POSTGRES_DB") +db_name_test <- glue::glue("{db_name}__test") + +# create the test database using connection with the original database +create_database(db_name_test) + +# now that the database is created, we can set the environment variable +# to the test database name +# we will be working on the test database from now on +withr::local_envvar( + list( + POSTGRES_DB = db_name_test + ) +) + +# drop the test database upon exiting +withr::defer( + { + # make sure db_name_test ends with __test before dropping it + assert( + stringr::str_detect(db_name_test, "__test$"), + "db_name_test should end with __test" + ) + setwd(root_repo_directory) + drop_database(db_name_test) + }, + teardown_env() +) + +# run migrations +exit_code <- run_migrations() +if (exit_code != 0) { + stop( + glue::glue( + "Failed to run database migrations", + "exit code: {exit_code}" + ) + ) +} + +# We will run the unbiased API in the background +# and wait until it starts +# We are setting the environment variables +# so that the unbiased API will start an HTTP server +# on the specified host and port without coliision +# with the main API that might be running on the same machine +withr::local_envvar( + list( + UNBIASED_HOST = api_host, + UNBIASED_PORT = api_port + ) +) + +# Mock GITHUB_SHA as valid sha if it is not set +github_sha <- Sys.getenv( + "GITHUB_SHA", + "6e21b5b689cc9737ba0d24147ed4b634c7146a28" +) +if (github_sha == "") { + github_sha <- "6e21b5b689cc9737ba0d24147ed4b634c7146a28" +} +withr::local_envvar( + list( + GITHUB_SHA = github_sha + ) +) + +stdout_file <- withr::local_tempfile( + fileext = ".log", + .local_envir = teardown_env() +) + +stderr_file <- withr::local_tempfile( + fileext = ".log", + .local_envir = teardown_env() +) + +plumber_process <- callr::r_bg( + \() { + if (!requireNamespace("unbiased", quietly = TRUE)) { + # There is no installed unbiased package + # In that case, we will assume that we are running + # on the development machine + # and we will load the package using devtools + print("Installing unbiased package using devtools") + devtools::load_all() + } + + unbiased:::run_unbiased() + }, + supervise = TRUE, + stdout = stdout_file, + stderr = stderr_file, +) + +withr::defer( + { + print("Server STDOUT:") + lines <- readLines(stdout_file) + writeLines(lines) + print("Server STDERR:") + lines <- readLines(stderr_file) + writeLines(lines) + print("Sending SIGINT to plumber process") + plumber_process$interrupt() + + print("Waiting for plumber process to exit") + plumber_process$wait() + }, + teardown_env() +) + +# go back to the original working directory +# that is used by the testthat package +setwd(current_working_dir) + +setup_test_db_connection_pool(envir = teardown_env()) + +# Retry a request until the API starts +print("Waiting for the API to start...") +request(api_url) |> + # Endpoint that should be always available + req_url_path("meta", "sha") |> + req_method("GET") |> + req_retry( + max_seconds = 30, + backoff = \(x) 1 + ) |> + req_perform() +print("API started, running tests...") diff --git a/tests/testthat/test-DB-0.R b/tests/testthat/test-DB-0.R new file mode 100644 index 0000000..1d81bee --- /dev/null +++ b/tests/testthat/test-DB-0.R @@ -0,0 +1,30 @@ +# Named with '0' to make sure that this one runs first because it validates +# basic properties of the database + +source("./test-helpers.R") + +# Setup constants ---- + + +# Test values ---- +test_that("database contains base tables", { + conn <- pool::localCheckout( + get("db_connection_pool", envir = globalenv()) + ) + with_db_fixtures("fixtures/example_db.yml") + expect_contains( + DBI::dbListTables(conn), + c(versioned_tables, nonversioned_tables) + ) +}) + +test_that("database contains history tables", { + conn <- pool::localCheckout( + get("db_connection_pool", envir = globalenv()) + ) + with_db_fixtures("fixtures/example_db.yml") + expect_contains( + DBI::dbListTables(conn), + glue::glue("{versioned_tables}_history") + ) +}) diff --git a/tests/testthat/test-DB-study.R b/tests/testthat/test-DB-study.R new file mode 100644 index 0000000..9e4ee15 --- /dev/null +++ b/tests/testthat/test-DB-study.R @@ -0,0 +1,212 @@ +source("./test-helpers.R") + +pool <- get("db_connection_pool", envir = globalenv()) + +test_that("it is enough to provide a name, an identifier, and a method id", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + testthat::expect_no_error({ + dplyr::tbl(conn, "study") |> + dplyr::rows_append( + tibble::tibble( + identifier = "FINE", + name = "Correctly working study", + method = "minimisation_pocock" + ), + copy = TRUE, in_place = TRUE + ) + }) +}) + +# first study id is 1 +new_study_id <- as.integer(1) + +test_that("deleting archivizes a study", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + testthat::expect_no_error({ + dplyr::tbl(conn, "study") |> + dplyr::rows_delete( + tibble::tibble(id = new_study_id), + copy = TRUE, in_place = TRUE, unmatched = "ignore" + ) + }) + + testthat::expect_identical( + dplyr::tbl(conn, "study_history") |> + dplyr::filter(id == new_study_id) |> + dplyr::select(-parameters, -sys_period, -timestamp) |> + dplyr::collect(), + tibble::tibble( + id = new_study_id, + identifier = "TEST", + name = "Test Study", + method = "minimisation_pocock" + ) + ) +}) + +test_that("can't push arm with negative ratio", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + testthat::expect_error( + { + dplyr::tbl(conn, "arm") |> + dplyr::rows_append( + tibble::tibble( + study_id = 1, + name = "Exception-throwing arm", + ratio = -1 + ), + copy = TRUE, in_place = TRUE + ) + }, + regexp = "violates check constraint" + ) +}) + +test_that("can't push stratum other than factor or numeric", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + testthat::expect_error( + { + tbl(conn, "stratum") |> + rows_append( + tibble( + study_id = 1, + name = "failing stratum", + value_type = "array" + ), + copy = TRUE, in_place = TRUE + ) + }, + regexp = "violates check constraint" + ) +}) + +test_that("can't push stratum level outside of defined levels", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + # create a new patient + return <- + testthat::expect_no_error({ + dplyr::tbl(conn, "patient") |> + dplyr::rows_append( + tibble::tibble( + study_id = 1, + arm_id = 1, + used = TRUE + ), + copy = TRUE, in_place = TRUE, returning = id + ) |> + dbplyr::get_returned_rows() + }) + + added_patient_id <- return$id + + testthat::expect_error( + { + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( + patient_id = added_patient_id, + stratum_id = 1, + fct_value = "Female" + ), + copy = TRUE, in_place = TRUE + ) + }, + regexp = "Factor value not specified as allowed" + ) + + # add legal value + testthat::expect_no_error({ + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( + patient_id = added_patient_id, + stratum_id = 1, + fct_value = "F" + ), + copy = TRUE, in_place = TRUE + ) + }) +}) + +test_that("numerical constraints are enforced", { + conn <- pool::localCheckout(pool) + with_db_fixtures("fixtures/example_db.yml") + added_patient_id <- as.integer(1) + return <- + testthat::expect_no_error({ + dplyr::tbl(conn, "stratum") |> + dplyr::rows_append( + tibble::tibble( + study_id = 1, + name = "age", + value_type = "numeric" + ), + copy = TRUE, in_place = TRUE, returning = id + ) |> + dbplyr::get_returned_rows() + }) + + added_stratum_id <- return$id + + testthat::expect_no_error({ + dplyr::tbl(conn, "numeric_constraint") |> + dplyr::rows_append( + tibble::tibble( + stratum_id = added_stratum_id, + min_value = 18, + max_value = 64 + ), + copy = TRUE, in_place = TRUE + ) + }) + + # and you can't add an illegal value + testthat::expect_error( + { + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( + patient_id = added_patient_id, + stratum_id = added_stratum_id, + num_value = 16 + ), + copy = TRUE, in_place = TRUE + ) + }, + regexp = "New value is lower than minimum" + ) + + # you can add valid value + testthat::expect_no_error({ + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + dplyr::tibble( + patient_id = added_patient_id, + stratum_id = added_stratum_id, + num_value = 23 + ), + copy = TRUE, in_place = TRUE + ) + }) + + # but you cannot add two values for one patient one stratum + testthat::expect_error( + { + dplyr::tbl(conn, "patient_stratum") |> + dplyr::rows_append( + tibble::tibble( + patient_id = added_patient_id, + stratum_id = added_stratum_id, + num_value = 24 + ), + copy = TRUE, in_place = TRUE + ) + }, + regexp = "duplicate key value violates unique constraint" + ) +}) diff --git a/tests/testthat/test-E2E-get-study.R b/tests/testthat/test-E2E-get-study.R new file mode 100644 index 0000000..dbb42de --- /dev/null +++ b/tests/testthat/test-E2E-get-study.R @@ -0,0 +1,172 @@ +test_that("correct request to reads studies with the structure of the returned result", { + source("./test-helpers.R") + source("./audit-log-test-helpers.R") + + conn <- pool::localCheckout( + get("db_connection_pool", envir = globalenv()) + ) + with_db_fixtures("fixtures/example_db.yml") + + # this endpoint should not be logged + assert_audit_trail_for_test(c()) + + response <- request(api_url) |> + req_url_path("study", "") |> + req_method("GET") |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_equal(response$status_code, 200) + + checkmate::expect_names( + names(response_body[[1]]), + identical.to = c("study_id", "identifier", "name", "method", "last_edited") + ) + + checkmate::expect_list( + response_body[[1]], + any.missing = TRUE, + null.ok = FALSE, + len = 5, + type = c("numeric", "character", "character", "character", "character") + ) + + # Compliance of the number of tests + + n_studies <- + dplyr::tbl(db_connection_pool, "study") |> + collect() |> + nrow() + + testthat::expect_equal(length(response_body), n_studies) +}) + +test_that("requests to reads records for chosen study_id with the structure of the returned result", { + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + response_study <- + request(api_url) |> + req_url_path("study", response_body$study$id) |> + req_method("GET") |> + req_perform() + + response_study_body <- + response_study |> + resp_body_json() + + testthat::expect_equal(response$status_code, 200) + + checkmate::expect_names( + names(response_study_body), + identical.to = c("study_id", "name", "randomization_method", "last_edited", "p", "method", "strata", "arms") + ) + + checkmate::expect_list( + response_study_body, + any.missing = TRUE, + null.ok = TRUE, + len = 8, + type = c("numeric", "character", "character", "character", "numeric", "character", "list", "character") + ) + + response_study_id <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id + 1) |> + req_method("GET") |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_study_id$status, 404) +}) + +test_that("correct request to reads randomization list with the structure of the returned result", { + source("./test-helpers.R") + + conn <- pool::localCheckout( + get("db_connection_pool", envir = globalenv()) + ) + + with_db_fixtures("fixtures/example_db.yml") + + response <- + request(api_url) |> + req_url_path("/study/1/randomization_list") |> + req_method("GET") |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_equal(response$status_code, 200) + + checkmate::expect_names( + names(response_body[[1]]), + identical.to = c("patient_id", "arm", "used", "sys_period") + ) + + checkmate::expect_set_equal( + x = response_body |> + dplyr::bind_rows() |> + dplyr::pull(patient_id), + y = c(1, 2, 3, 4) + ) +}) + +test_that("incorrect input study_id to reads randomization list", { + source("./test-helpers.R") + + conn <- pool::localCheckout( + get("db_connection_pool", envir = globalenv()) + ) + with_db_fixtures("fixtures/example_db.yml") + + response <- + tryCatch( + { + request(api_url) |> + req_url_path("study/100/randomization_list") |> + req_method("GET") |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response$status, 404) +}) diff --git a/tests/testthat/test-E2E-meta-tag.R b/tests/testthat/test-E2E-meta-tag.R index 048e4d6..eb8fee2 100644 --- a/tests/testthat/test-E2E-meta-tag.R +++ b/tests/testthat/test-E2E-meta-tag.R @@ -2,9 +2,8 @@ test_that("meta tag endpoint returns the SHA", { response <- request(api_url) |> req_url_path("meta", "sha") |> req_method("GET") |> - req_retry(max_tries = 5) |> req_perform() |> resp_body_json() - + expect_string(response, n.chars = 40, pattern = "^[0-9a-f]{40}$") }) diff --git a/tests/testthat/test-E2E-simple.R b/tests/testthat/test-E2E-simple.R deleted file mode 100644 index f32b8e6..0000000 --- a/tests/testthat/test-E2E-simple.R +++ /dev/null @@ -1,10 +0,0 @@ -test_that("hello world endpoint returns the message", { - response <- request(api_url) |> - req_url_path("simple", "hello") |> - req_method("GET") |> - req_retry(max_tries = 5) |> - req_perform() |> - resp_body_json() - - expect_identical(response, "Hello TTSI!") -}) diff --git a/tests/testthat/test-E2E-study-minimisation-pocock.R b/tests/testthat/test-E2E-study-minimisation-pocock.R new file mode 100644 index 0000000..65244ab --- /dev/null +++ b/tests/testthat/test-E2E-study-minimisation-pocock.R @@ -0,0 +1,504 @@ +test_that("correct request with the structure of the returned result", { + source("./test-helpers.R") + source("./audit-log-test-helpers.R") + with_db_fixtures("fixtures/example_db.yml") + assert_audit_trail_for_test(c( + "study_create", + "randomize_patient" + )) + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_equal(response$status_code, 200) + checkmate::expect_number(response_body$study$id, lower = 1) + + response_patient <- request(api_url) |> + req_url_path("study", response_body$study$id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more"), + "arm" = c("placebo", "") + ) + ) + ) |> + req_perform() + + response_patient_body <- + response_patient |> + resp_body_json() + + testthat::expect_equal(response$status_code, 200) + checkmate::expect_number(response_patient_body$patient_id, lower = 1) + + # Endpoint Response Structure Test + checkmate::expect_names( + names(response_patient_body), + identical.to = c("patient_id", "arm_id", "arm_name") + ) + + checkmate::expect_list( + response_patient_body, + any.missing = TRUE, + null.ok = FALSE, + len = 3, + type = c("numeric", "numeric", "character") + ) + + checkmate::test_true( + dplyr::tbl(db_connection_pool, "patient") |> + dplyr::slice_max(id) |> + dplyr::collect() |> + dplyr::pull(used), + TRUE + ) +}) + +test_that("request with one covariate at two levels", { + response_cov <- + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ) + ) + ) + ) |> + req_perform() + + response_cov_body <- + response_cov |> + resp_body_json() + + testthat::expect_equal(response_cov$status_code, 200) +}) + +test_that("request with incorrect study id", { + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + response_study <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id + 1, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more"), + "arm" = c("placebo", "") + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_study$status, 404, label = "HTTP status code") +}) + +test_that("request with patient that is assigned an arm at entry", { + response <- request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + response_cs_arm <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more"), + "arm" = c("placebo", "control") + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal( + response_cs_arm$status, 400, + label = "HTTP status code" + ) + + response_cs_records <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female"), + "weight" = c("61-80 kg"), + "arm" = c("placebo") + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal( + response_cs_records$status, 400, + label = "HTTP status code" + ) + + response_current_state <- + tryCatch( + { + request(api_url) |> + req_url_path("study", response_body$study$id, "patient") |> + req_method("POST") |> + req_body_json( + data = list( + current_state = + tibble::tibble( + "sex" = c("female", "male"), + "weight" = c("61-80 kg", "81 kg or more") + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal( + response_current_state$status, 400, + label = "HTTP status code" + ) +}) + +test_that("request with incorrect number of levels", { + response_cov <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_cov$status, 400) +}) + +test_that("request with incorrect parameter p", { + response_p <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = "A", + arms = list( + "placebo" = 1, + "active" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_p$status, 400) +}) + +test_that("request with incorrect arms", { + response_arms <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_raw('{ + "identifier": "ABC-X", + "name": "Study ABC-X", + "method": "var", + "p": 0.85, + "arms": { + "placebo": 1, + "placebo": 1 + }, + "covariates": { + "sex": { + "weight": 1, + "levels": ["female", "male"] + }, + "weight": { + "weight": 1, + "levels": ["up to 60kg", "61-80 kg", "81 kg or more"] + } + } + }') |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_arms$status, 400) +}) + +test_that("request with incorrect method", { + response_method <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = 1, + p = 0.85, + arms = list( + "placebo" = 1, + "control" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_method$status, 400) +}) + +test_that("request with incorrect weights", { + response_weights <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = 1, + "control" = 1 + ), + covariates = list( + sex = list( + weight = "1", + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_weights$status, 400) +}) + +test_that("request with incorrect ratio", { + response_ratio <- + tryCatch( + { + request(api_url) |> + req_url_path("study", "minimisation_pocock") |> + req_method("POST") |> + req_body_json( + data = list( + identifier = "ABC-X", + name = "Study ABC-X", + method = "var", + p = 0.85, + arms = list( + "placebo" = "1", + "control" = 1 + ), + covariates = list( + sex = list( + weight = 1, + levels = c("female", "male") + ), + weight = list( + weight = 1, + levels = c("up to 60kg", "61-80 kg", "81 kg or more") + ) + ) + ) + ) |> + req_perform() + }, + error = function(e) e + ) + + testthat::expect_equal(response_ratio$status, 400) +}) diff --git a/tests/testthat/test-api-audit-log.R b/tests/testthat/test-api-audit-log.R new file mode 100644 index 0000000..2ae767f --- /dev/null +++ b/tests/testthat/test-api-audit-log.R @@ -0,0 +1,88 @@ +source("./test-helpers.R") +source("./audit-log-test-helpers.R") + +testthat::test_that("audit logs for study are returned correctly from the database", { + with_db_fixtures("fixtures/example_audit_logs.yml") + studies <- c(1, 2, 3) + counts <- c(1, 4, 1) + for (i in 1:3) { + study_id <- studies[i] + count <- counts[i] |> + as.integer() + response <- request(api_url) |> + req_url_path("study", study_id, "audit") |> + req_method("GET") |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_identical(response$status_code, 200L) + testthat::expect_identical(length(response_body), count) + + created_at <- response_body |> dplyr::bind_rows() |> dplyr::pull("created_at") + testthat::expect_equal( + created_at, + created_at |> sort() + ) + + if (count > 0) { + body <- response_body[[1]] + testthat::expect_setequal(names(body), c( + "id", + "created_at", + "event_type", + "request_id", + "study_id", + "endpoint_url", + "request_method", + "request_body", + "response_code", + "response_body", + "user_agent", + "ip_address" + )) + + testthat::expect_equal(body$study_id, study_id) + testthat::expect_equal(body$event_type, "example_event") + testthat::expect_equal(body$request_method, "GET") + testthat::expect_equal(body$endpoint_url, "/api/example") + testthat::expect_equal(body$response_code, 200) + testthat::expect_equal(body$request_body, list(key1 = "value1", key2 = "value2")) + testthat::expect_equal(body$response_body, list(key1 = "value1", key2 = "value2")) + } + } +}) + +testthat::test_that("should return 404 when study does not exist", { + with_db_fixtures("fixtures/example_audit_logs.yml") + response <- request(api_url) |> + req_url_path("study", 1111, "audit") |> + req_method("GET") |> + req_error(is_error = \(x) FALSE) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_equal(response$status_code, 404) + testthat::expect_equal(response_body$error, "Study not found") +}) + +testthat::test_that("should not log audit trail for non-existent endpoint", { + with_db_fixtures("fixtures/example_audit_logs.yml") + assert_audit_trail_for_test(events = c()) + response <- request(api_url) |> + req_url_path("study", 1, "non-existent-endpoint") |> + req_method("GET") |> + req_error(is_error = \(x) FALSE) |> + req_perform() + + response_body <- + response |> + resp_body_json() + + testthat::expect_equal(response$status_code, 404) +}) diff --git a/tests/testthat/test-error-handling.R b/tests/testthat/test-error-handling.R new file mode 100644 index 0000000..d5fd1bf --- /dev/null +++ b/tests/testthat/test-error-handling.R @@ -0,0 +1,91 @@ +testthat::test_that("uses correct environment variables when setting up sentry", { + withr::local_envvar( + c( + SENTRY_DSN = "https://sentry.io/123", + GITHUB_SHA = "abc", + SENTRY_ENVIRONMENT = "production", + SENTRY_RELEASE = "1.0.0" + ) + ) + + testthat::local_mocked_bindings( + configure_sentry = function(dsn, + app_name, + app_version, + environment, + release) { + testthat::expect_equal(dsn, "https://sentry.io/123") + testthat::expect_equal(app_name, "unbiased") + testthat::expect_equal(app_version, "abc") + testthat::expect_equal(environment, "production") + testthat::expect_equal(release, "1.0.0") + }, + .package = "sentryR", + ) + + global_calling_handlers_called <- FALSE + + # mock globalCallingHandlers + testthat::local_mocked_bindings( + globalCallingHandlers = function(error) { + global_calling_handlers_called <<- TRUE + testthat::expect_equal( + unbiased:::global_calling_handler, + error + ) + }, + ) + + unbiased:::setup_sentry() + + testthat::expect_true(global_calling_handlers_called) +}) + +testthat::test_that("skips sentry setup if SENTRY_DSN is not set", { + withr::local_envvar( + c( + SENTRY_DSN = "" + ) + ) + + testthat::local_mocked_bindings( + configure_sentry = function(dsn, + app_name, + app_version, + environment, + release) { + # should not be called, so we fail the test + testthat::expect_true(FALSE) + }, + .package = "sentryR", + ) + + was_called <- FALSE + + # mock globalCallingHandlers + testthat::local_mocked_bindings( + globalCallingHandlers = function(error) { + was_called <<- TRUE + }, + ) + + testthat::expect_message(unbiased:::setup_sentry(), "SENTRY_DSN not set, skipping Sentry setup") + testthat::expect_false(was_called) +}) + +testthat::test_that("global_calling_handler captures exception and signals condition", { + error <- simpleError("test error") + + capture_exception_called <- FALSE + + testthat::local_mocked_bindings( + capture_exception = function(error) { + capture_exception_called <<- TRUE + testthat::expect_equal(error, error) + }, + .package = "sentryR", + ) + + testthat::expect_error(unbiased:::global_calling_handler(error)) + testthat::expect_true(capture_exception_called) +}) diff --git a/tests/testthat/test-helpers.R b/tests/testthat/test-helpers.R new file mode 100644 index 0000000..271e501 --- /dev/null +++ b/tests/testthat/test-helpers.R @@ -0,0 +1,61 @@ +versioned_tables <- c( + "study", "arm", "stratum", "factor_constraint", + "numeric_constraint", "patient", "patient_stratum" +) +nonversioned_tables <- c() + +all_tables <- c( + versioned_tables, + nonversioned_tables, + versioned_tables |> paste0("_history") +) + +with_db_fixtures <- function(test_data_path, env = parent.frame()) { + pool <- get("db_connection_pool", envir = .GlobalEnv) + conn <- pool::localCheckout(pool) + + # load test data in yaml format + test_data <- yaml::read_yaml(test_data_path) + + # truncate tables before inserting data + truncate_tables(all_tables) + + for (table_name in names(test_data)) { + # get table data + table_data <- test_data[table_name] |> dplyr::bind_rows() + + DBI::dbWriteTable( + conn, + table_name, + table_data, + append = TRUE, + row.names = FALSE + ) + } + + withr::defer( + { + truncate_tables(all_tables) + }, + env + ) +} + +truncate_tables <- function(tables) { + pool <- get("db_connection_pool", envir = .GlobalEnv) + conn <- pool::localCheckout(pool) + DBI::dbExecute( + "SET client_min_messages TO WARNING;", + conn = conn + ) + tables |> + rev() |> + purrr::walk( + \(table_name) { + glue::glue_sql( + "TRUNCATE TABLE {`table_name`} RESTART IDENTITY CASCADE;", + .con = conn + ) |> DBI::dbExecute(conn = conn) + } + ) +} diff --git a/tests/testthat/test-malformed-requests.R b/tests/testthat/test-malformed-requests.R new file mode 100644 index 0000000..22e279c --- /dev/null +++ b/tests/testthat/test-malformed-requests.R @@ -0,0 +1,17 @@ +source("./test-helpers.R") +source("./audit-log-test-helpers.R") + +testthat::test_that("should handle malformed request correctly", { + with_db_fixtures("fixtures/example_audit_logs.yml") + assert_audit_trail_for_test(events = c("malformed_request")) + malformed_json <- "test { test }" + response <- + request(api_url) |> + req_url_path("study") |> + req_method("POST") |> + req_error(is_error = \(x) FALSE) |> + req_body_raw(malformed_json) |> # <--- Malformed request + req_perform() + + testthat::expect_equal(response$status_code, 400) +}) diff --git a/tests/testthat/test-randomize-minimisation-pocock.R b/tests/testthat/test-randomize-minimisation-pocock.R new file mode 100644 index 0000000..cce2b06 --- /dev/null +++ b/tests/testthat/test-randomize-minimisation-pocock.R @@ -0,0 +1,188 @@ +set.seed(seed = "345345") +n_at_the_moment <- 10 +arms <- c("control", "active low", "active high") +sex <- sample(c("F", "M"), + n_at_the_moment + 1, + replace = TRUE, + prob = c(0.4, 0.6) +) +diabetes <- + sample(c("diabetes", "no diabetes"), + n_at_the_moment + 1, + replace = TRUE, + prob = c(0.2, 0.8) + ) +arm <- + sample(arms, + n_at_the_moment, + replace = TRUE, + prob = c(0.4, 0.4, 0.2) + ) |> + c("") +covar_df <- tibble::tibble(sex, diabetes, arm) + +test_that("You can call function and it returns arm", { + expect_subset( + randomize_minimisation_pocock(arms = arms, current_state = covar_df), + choices = arms + ) +}) + +test_that("Assertions work", { + expect_error( + randomize_minimisation_pocock( + arms = c(1, 2), current_state = covar_df + ), + regexp = "Must be of type 'character'" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + method = "nonexistent" + ), + regexp = "Must be element of set .'range','var','sd'., but is 'nonexistent'" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, + current_state = "5 patietns OK" + ), + regexp = + "Assertion on 'current_state' failed: Must be a tibble, not character" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, + current_state = covar_df[, 1:2] + ), + regexp = "Names must include the elements .'arm'." + ) + # Last subject already randomized + expect_error( + randomize_minimisation_pocock(arms = arms, current_state = covar_df[1:3, ]), + regexp = "must have at most 0 characters" + ) + expect_error( + randomize_minimisation_pocock( + arms = c("foo", "bar"), + current_state = covar_df + ), + regexp = "Must be a subset of .'foo','bar',''." + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + weights = c("sex" = -1, "diabetes" = 2) + ), + regexp = "Element 1 is not >= 0" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + weights = c("wrong" = 1, "diabetes" = 2) + ), + regexp = "is missing elements .'sex'." + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + ratio = c( + "control" = 1.5, + "active low" = 2, + "active high" = 1 + ) + ), + regexp = "element 1 is not close to an integer" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + ratio = c( + "control" = 1L, + "active high" = 1L + ) + ), + regexp = "Must have length 3, but has length 2" + ) + expect_error( + randomize_minimisation_pocock( + arms = arms, current_state = covar_df, + p = 12 + ), + regexp = "Assertion on 'p' failed: Element 1 is not <= 1" + ) +}) + +test_that("Function randomizes first patient randomly", { + randomized <- + sapply(1:100, function(x) { + randomize_minimisation_pocock( + arms = arms, + current_state = covar_df[nrow(covar_df), ] + ) + }) + test <- prop.test( + x = sum(randomized == "control"), + n = length(randomized), + p = 1 / 3, + conf.level = 0.95, + correct = FALSE + ) + expect_gt(test$p.value, 0.05) +}) + +test_that("Function randomizes second patient deterministically", { + arms <- c("A", "B") + situation <- tibble::tibble( + sex = c("F", "F"), + arm = c("A", "") + ) + randomized <- + randomize_minimisation_pocock( + arms = arms, + current_state = situation, + p = 1 + ) + + expect_equal(randomized, "B") +}) + +test_that("Setting proportion of randomness works", { + arms <- c("A", "B") + situation <- tibble::tibble( + sex = c("F", "F"), + arm = c("A", "") + ) + + randomized <- + sapply(1:100, function(x) { + randomize_minimisation_pocock( + arms = arms, + current_state = situation, + p = 0.60 + ) + }) + # 60% to minimization arm (B) 40% to other arm (in this case A) + + test <- prop.test(table(randomized), p = 0.4, correct = FALSE) + + expect_gt(test$p.value, 0.05) +}) + +test_that("Method 'range' works properly", { + arms <- c("A", "B", "C") + situation <- tibble::tibble( + sex = c("F", "M", "F"), + diabetes_type = c("type2", "type2", "type2"), + arm = c("A", "B", "") + ) + randomized <- + randomize_minimisation_pocock( + arms = arms, + current_state = situation, + p = 1, + method = "range" + ) + + testthat::expect_equal(randomized, "C") +}) diff --git a/tests/testthat/test-randomize-simple.R b/tests/testthat/test-randomize-simple.R new file mode 100644 index 0000000..c8d3819 --- /dev/null +++ b/tests/testthat/test-randomize-simple.R @@ -0,0 +1,76 @@ +test_that("returns a single string", { + expect_vector( + randomize_simple( + c("active", "placebo"), + c("active" = 2L, "placebo" = 1L) + ), + ptype = character(), + size = 1 + ) +}) + +test_that("returns one of the arms", { + arms <- c("active", "placebo") + expect_subset( + randomize_simple(arms), + arms + ) +}) + +test_that("ratio equal to 0 means that this arm is never assigned", { + expect_identical( + randomize_simple(c("yes", "no"), c("yes" = 2L, "no" = 0L)), + "yes" + ) +}) + +test_that("incorrect parameters raise an exception", { + # Incorrect arm type + expect_error(randomize_simple(c(7, 4))) + # Incorrect ratio type + expect_error(randomize_simple(c("roof", "basement"), c("high", "low"))) + # Lengths not matching + expect_error(randomize_simple( + c("Paris", "Barcelona"), + c("Paris" = 1L, "Barcelona" = 2L, "Warsaw" = 1L) + )) + # Missing value + expect_error(randomize_simple(c("yen", NA))) + # Empty arm name + expect_error(randomize_simple(c("llama", ""))) + # Doubled arm name + expect_error(randomize_simple(c("llama", "llama"))) +}) + +test_that("proportions are kept (allocation 1:1)", { + randomizations <- + sapply(1:1000, function(x) randomize_simple(c("armA", "armB"))) + x <- prop.test( + x = sum(randomizations == "armA"), + n = length(randomizations), + p = 0.5, + conf.level = 0.95, + correct = FALSE + ) + + # precision 0.01 + expect_gt(x$p.value, 0.01) +}) + +test_that( + "proportions are kept (allocation 2:1), even if ratio is in reverse", + { + function_result <- sapply(1:1000, function(x) { + randomize_simple(c("armA", "armB"), c("armB" = 1L, "armA" = 2L)) + }) + x <- prop.test( + x = sum(function_result == "armA"), + n = length(function_result), + p = 2 / 3, + conf.level = 0.95, + correct = FALSE + ) + # precision 0.01 + expect_gt(x$p.value, 0.01) + } +) diff --git a/unbiased.Rproj b/unbiased.Rproj index 8e3c2eb..8d6c830 100644 --- a/unbiased.Rproj +++ b/unbiased.Rproj @@ -1,7 +1,7 @@ Version: 1.0 -RestoreWorkspace: Default -SaveWorkspace: Default +RestoreWorkspace: No +SaveWorkspace: No AlwaysSaveHistory: Default EnableCodeIndexing: Yes @@ -11,3 +11,14 @@ Encoding: UTF-8 RnwWeave: Sweave LaTeX: pdfLaTeX + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes +LineEndingConversion: Posix + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source +PackageRoxygenize: rd,collate,namespace + +QuitChildProcessesOnExit: Yes diff --git a/vignettes/articles/1000_sim_data.Rds b/vignettes/articles/1000_sim_data.Rds new file mode 100644 index 0000000..2dcc0a5 Binary files /dev/null and b/vignettes/articles/1000_sim_data.Rds differ diff --git a/vignettes/articles/helpers/functions.R b/vignettes/articles/helpers/functions.R new file mode 100644 index 0000000..2fde29a --- /dev/null +++ b/vignettes/articles/helpers/functions.R @@ -0,0 +1,116 @@ +# functions + +simulate_data_monte_carlo <- + function(def, n) { + data <- + genData(n, def) |> + mutate( + sex = as.character(sex), + age = as.character(age), + diabetes_type = as.character(diabetes_type), + hba1c = as.character(hba1c), + tpo2 = as.character(tpo2), + wound_size = as.character(wound_size) + ) |> + tibble::as_tibble() |> + tibble::add_column(arm = "") + + return(data) + } + +minimize_results <- + function(current_data, arms, weights) { + for (n in seq_len(nrow(current_data))) { + current_state <- current_data[1:n, 2:ncol(current_data)] + + current_data$arm[n] <- + randomize_minimisation_pocock( + arms = arms, + current_state = current_state, + weights = weights + ) + } + + return(current_data$arm) + } + +simple_results <- + function(current_data, arms, ratio) { + for (n in seq_len(nrow(current_data))) { + current_data$arm[n] <- + randomize_simple(arms, ratio) + } + + return(current_data$arm) + } + +# Function to generate a randomisation list +block_rand <- + function(n, block, n_groups, strata, arms = LETTERS[1:n_groups]) { + strata_grid <- expand.grid(strata) + + strata_n <- nrow(strata_grid) + + ratio <- rep(1, n_groups) + + gen_seq_list <- lapply(seq_len(strata_n), function(i) { + rand <- rpbrPar( + N = n, + rb = block, + K = n_groups, + ratio = ratio, + groups = arms, + filledBlock = FALSE + ) + getRandList(gen_seq_list(rand))[1, ] + }) + df_list <- tibble::tibble() + for (i in seq_len(strata_n)) { + local_df <- strata_grid |> + dplyr::slice(i) |> + dplyr::mutate(count = N) |> + tidyr::uncount(count) |> + tibble::add_column(rand_arm = genSeq_list[[i]]) + df_list <- rbind(local_df, df_list) + } + return(df_list) + } + +# Generate a research arm for patients in each iteration +block_results <- function(current_data) { + simulation_result <- + block_rand( + n = n, + block = c(3, 6, 9), + n_groups = 3, + strata = list( + sex = c("0", "1"), + diabetes_type = c("0", "1"), + hba1c = c("0", "1"), + tpo2 = c("0", "1"), + age = c("0", "1"), + wound_size = c("0", "1") + ), + arms = c("armA", "armB", "armC") + ) + + for (n in seq_len(nrow(current_data))) { + # "-1" is for "arm" column + current_state <- current_data[n, 2:(ncol(current_data) - 1)] + + matching_rows <- which(apply( + simulation_result[, -ncol(simulation_result)], 1, + function(row) all(row == current_state) + )) + + if (length(matching_rows) > 0) { + current_data$arm[n] <- + simulation_result[matching_rows[1], "rand_arm"] + + # Delete row from randomization list + simulation_result <- simulation_result[-matching_rows[1], , drop = FALSE] + } + } + + return(current_data$arm) +} diff --git a/vignettes/articles/helpers/run_parallel.R b/vignettes/articles/helpers/run_parallel.R new file mode 100644 index 0000000..1d13604 --- /dev/null +++ b/vignettes/articles/helpers/run_parallel.R @@ -0,0 +1,77 @@ +source("helpers/functions.R") + +# set cluster +library(parallel) +# Start parallel cluster +cl <- makeForkCluster(no_of_cores) + +results <- + parLapply(cl, 1:no_of_iterations, function(i) { + # lapply(1:no_of_iterations, funĆction(i) { + set.seed(i) + + data <- simulate_data_monte_carlo(def, n) + + # eqal weights - 1/6 + minimize_equal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC") + ) + + # double weights where the covariant is of high clinical significance + minimize_unequal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 2, + "tpo2" = 2, + "age" = 1, + "wound_size" = 2 + ) + ) + + # triple weights where the covariant is of high clinical significance + minimize_unequal_weights_3 <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 3, + "tpo2" = 3, + "age" = 1, + "wound_size" = 3 + ) + ) + + simple_data <- + simple_results( + current_data = data, + arms = c("armA", "armB", "armC"), + ratio = c("armB" = 1L, "armA" = 1L, "armC" = 1L) + ) + + block_data <- + block_results(current_data = data) + + data <- + data %>% + select(-arm) %>% + mutate( + minimize_equal_weights_arms = minimize_equal_weights, + minimize_unequal_weights_arms = minimize_unequal_weights, + minimize_unequal_weights_triple_arms = minimize_unequal_weights_3, + simple_data_arms = simple_data, + block_data_arms = block_data + ) %>% + tibble::add_column(simnr = i, .before = 1) + + return(data) + }) + +stopCluster(cl) diff --git a/vignettes/articles/minimization_randomization_comparison.Rmd b/vignettes/articles/minimization_randomization_comparison.Rmd new file mode 100644 index 0000000..9f09e44 --- /dev/null +++ b/vignettes/articles/minimization_randomization_comparison.Rmd @@ -0,0 +1,643 @@ +--- +title: "Comparison of Minimization Randomization with Other Randomization Methods. Assessing the balance of covariates." +author: + - Aleksandra Duda, Jagoda GƂowacka-Walas^[Tranistion Technologies Science] +date: "`r Sys.Date()`" +output: + html_document: + toc: yes +bibliography: references.bib +link-citations: true +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE +) +``` + +## Introduction + +Randomization in clinical trials is the gold standard and is widely considered the best design for evaluating the effectiveness of new treatments compared to alternative treatments (standard of care) or placebo. Indeed, the selection of an appropriate randomisation is as important as the selection of an appropriate statistical analysis for the study and the analysis strategy, whether based on randomisation or on a population model (@berger2021roadmap). + +One of the primary advantages of randomization, particularly simple randomization (usually using flipping a coin method), is its ability to balance confounding variables across treatment groups. This is especially effective in large sample sizes (n > 200), where the random allocation of participants helps to ensure that both known and unknown confounders are evenly distributed between the study arms. This balanced distribution contributes significantly to the internal validity of the study, as it minimizes the risk of selection bias and confounding influencing the results (@lim2019randomization). + +It's important to note, however, that while simple randomization is powerful in large trials, it may not always guarantee an even distribution of confounding factors in trials with smaller sample sizes (n < 100). In such cases, the random allocation might result in imbalances in baseline characteristics between groups, which can affect the interpretation of the treatment's effectiveness. This potential limitation sets the stage for considering additional methods, such as stratified randomization, or dynamic minimization algorithms to address these challenges in smaller trials (@kang2008issues). + +This document provides a summary of the comparison of three randomization methods: simple randomization, block randomization, and adaptive randomization. Simple randomization and adaptive randomization (minimization method) are tools available in the `unbiased` package as `randomize_simple` and `randomize_minimisation_pocock` functions (@unbiased). The comparison aims to demonstrate the superiority of adaptive randomization (minimization method) over other methods in assessing the least imbalance of accompanying variables between therapeutic groups. Monte Carlo simulations were used to generate data, utilizing the `simstudy` package (@goldfeld2020simstudy). Parameters for the binary distribution of variables were based on data from the publication by @mrozikiewicz2023allogenic and information from researchers. + +The document structure is as follows: first, based on the defined parameters, data will be simulated using the Monte Carlo method for a single simulation; then, for the generated patient data, appropriate groups will be assigned to them using three randomization methods; these data will be summarized in the form of descriptive statistics along with the relevant statistical test; next, data prepared in .Rds format generated for 1000 simulations will be loaded., the results based on the standardised mean difference (SMD) test will be discussed in visual form (boxplot, violin plot) and as a percentage of success achieved in each method for the given precision (tabular summary) + +```{r setup, warning = FALSE, message=FALSE} +# load packages +library(unbiased) +library(dplyr) +library(simstudy) +library(tableone) +library(ggplot2) +library(gt) +library(gtsummary) +library(truncnorm) +library(tidyr) +library(randomizeR) +``` + +## The randomization methods considered for comparison + +In the process of comparing the balance of covariates among randomization methods, three randomization methods have been selected for evaluation: + +- **simple randomization** - simple coin toss, algorithm that gives participants equal chances of being assigned to a particular arm. The method's advantage lies in its simplicity and the elimination of predictability. However, due to its complete randomness, it may lead to imbalance in sample sizes between arms and imbalances between prognostic factors. For a large sample size (n > 200), simple randomisation gives a similar number of generated participants in each group. For a small sample size (n < 100), it results in an imbalance (@kang2008issues). + +- **block randomization** - a randomization method that takes into account defined covariates for patients. The method involves assigning patients to therapeutic arms in blocks of a fixed size, with the recommendation that the blocks have different sizes. This, to some extent, reduces the risk of researchers predicting future arm assignments. In contrast to simple randomization, the block method aims to balance the number of patients within the block, hence reducing the overall imbalance between arms (@rosenberger2015randomization). + +- **adaptive randomization using minimization method** based on @pocock1975sequential algorithm - - this randomization approach aims to balance prognostic factors across treatment arms within a clinical study. It functions by evaluating the total imbalance of these factors each time a new patient is considered for the study. The minimization method computes the overall imbalance for each potential arm assignment of the new patient, considering factors like variance or other specified criteria. The patient is then assigned to the arm where their addition results in the smallest total imbalance. This assignment is not deterministic but is made with a predetermined probability, ensuring some level of randomness in arm allocation. This method is particularly useful in trials with multiple prognostic factors or in smaller studies where traditional randomization might fail to achieve balance. + +## Assessment of covariate balance + +In the proposed approach to the assessment of randomization methods, the primary objective is to evaluate each method in terms of achieving balance in the specified covariates. The assessment of balance aims to determine whether the distributions of covariates are similarly balanced in each therapeutic group. Based on the literature, standardized mean differences (SMD) have been employed for assessing balance (@berger2021roadmap). + +The SMD method is one of the most commonly used statistics for assessing the balance of covariates, regardless of the unit of measurement. It is a statistical measure for comparing differences between two groups. The covariates in the examined case are expressed as binary variables. In the case of categorical variables, SMD is calculated using the following formula (@zhang2019balance): + +\[ SMD = \frac{{p_1 - p_2}}{{\sqrt{\frac{{p_1 \cdot (1 - p_1) + p_2 \cdot (1 - p_2)}}{2}}}} \], + +where: + +- \( p_1 \) is the proportion in the first arm, + +- \( p_2 \) is the proportion in the second arm. + +## Definied number of patients and number of iterations + +In this simulation, we are using a real use case - the planned FootCell study - non-commercial clinical research in the area of civilisation diseases - to guide our data generation process. For the FootCell study, it is anticipated that a total of 105 patients will be randomized into the trial. These patients will be equally divided among three research groups - Group A, Group B, and Group C - with each group comprising 35 patients. + +```{r, define-parameters} +# defined number of patients +n <- 105 +``` + +## Defining parameters for Monte-Carlo simulation + +The distribution of parameters for individual covariates, which will subsequently be used to validate randomization methods, has been defined using the publication @mrozikiewicz2023allogenic on allogenic interventions.. + +The publication describes the effectiveness of comparing therapy using ADSC (Adipose-Derived Stem Cells) gel versus standard therapy with fibrin gel for patients in diabetic foot ulcer treatment. The FootCell study also aims to assess the safety of advanced therapy involving live ASCs (Adipose-Derived Stem Cells) in the treatment of diabetic foot syndrome, considering two groups treated with ADSCs (one or two administrations) compared to fibrin gel. Therefore, appropriate population data have been extracted from the publication to determine distributions that can be maintained when designing the FootCell study. + +In the process of defining the study for randomization, the following covariates have been selected: + +- **gender** [male/female], + +- **diabetes type** [type I/type II], + +- **HbA1c** [up to 9/9 to 11] [%], + +- **tpo2** [up to 50/above 50] [mmHg], + +- **age** [up to 55/above 55] [years], + +- **wound size** [up to 2/above 2] [cm\(^2\)]. + +In the case of the variables gender and diabetes type in the publication @mrozikiewicz2023allogenic, they were expressed in the form of frequencies. The remaining variables were presented in terms of measures of central tendency along with an indication of variability, as well as minimum and maximum values. To determine the parameters for the binary distribution, the truncated normal distribution available in the `truncnorm` package was utilized. The truncated normal distribution is often used in statistics and probability modeling when dealing with data that is constrained to a certain range. It is particularly useful when you want to model a random variable that cannot take values beyond certain limits (@burkardt2014truncated). + +To generate the necessary information for the remaining covariates, a function `simulate_proportions_trunc` was written, utilizing the `rtruncnorm function` (@truncnorm). The parameters `mean`, `sd`, `lower`, `upper` were taken from the publication and based on expertise regarding the ranges for the parameters. + +The results are presented in a table, assuming that the outcome refers to the first category of each parameter. + +```{r, simulate-proportions-function} +# simulate parameters using truncated normal distribution +simulate_proportions_trunc <- + function(n, lower, upper, mean, sd, threshold) { + simulate_data <- + rtruncnorm( + n = n, + a = lower, + b = upper, + mean = mean, + sd = sd + ) <= threshold + + sum(simulate_data == TRUE) / n + } +``` + +```{r, parameters-result-table, tab.cap = "Summary of literature verification about strata selected parameters (Mrozikiewicz-Rakowska et. al., 2023)"} +set.seed(123) + +data.frame( + hba1c = simulate_proportions_trunc(1000, 0, 11, 7.41, 1.33, 9), + tpo2 = simulate_proportions_trunc(1000, 30, 100, 53.4, 18.4, 50), + age = simulate_proportions_trunc(1000, 0, 100, 59.2, 9.7, 55), + wound_size = simulate_proportions_trunc(1000, 0, 20, 2.7, 2.28, 2) +) |> + rename("wound size" = wound_size) |> + pivot_longer( + cols = everything(), + names_to = "parametr", + values_to = "proportions" + ) |> + mutate("first catogory of strata" = c("<=9", "<=50", "<=55", "<=2")) |> + gt() +``` + +## Generate data using Monte-Carlo simulations + +Monte-Carlo simulations were used to accumulate the data. This method is designed to model variables based on defined parameters. Variables were defined using the `simstudy` package, utilizing the `defData` function (@goldfeld2020simstudy). As all variables specify proportions, `dist = 'binary'` was used to define the variables. Due to the likely association between the type of diabetes and age – meaning that the older the patient, the higher the probability of having type II diabetes – a relationship with diabetes was established when defining the `age` variable using a logit function `link = "logit"`. The proportions for gender and diabetes were defined by the researchers and were consistent with the literature @mrozikiewicz2023allogenic. + +Using `genData` function from `simstudy` package, a data frame (**data**) was generated with an artificially adopted variable `arm`, which will be filled in by subsequent randomization methods in the arm allocation process for all `n` patients. + +```{r, defdata} +# defining variables + +# male - 0.9 +def <- simstudy::defData(varname = "sex", formula = "0.9", dist = "binary") +# type I - 0.15 +def <- simstudy::defData(def, varname = "diabetes_type", formula = "0.15", dist = "binary") +# <= 9 - 0.888 +def <- simstudy::defData(def, varname = "hba1c", formula = "0.888", dist = "binary") +# <= 50 - 0.354 +def <- simstudy::defData(def, varname = "tpo2", formula = "0.354", dist = "binary") +# correlation with diabetes type +def <- simstudy::defData( + def, + varname = "age", formula = "(diabetes_type == 0) * (-0.95)", link = "logit", dist = "binary" +) +# <= 2 - 0.302 +def <- simstudy::defData(def, varname = "wound_size", formula = "0.302", dist = "binary") +``` + +```{r, create-data} +# generate data using genData() +data <- + genData(n, def) |> + mutate( + sex = as.character(sex), + age = as.character(age), + diabetes_type = as.character(diabetes_type), + hba1c = as.character(hba1c), + tpo2 = as.character(tpo2), + wound_size = as.character(wound_size) + ) |> + as_tibble() +``` + +```{r, data-generate} +# add arm to tibble +data <- + data |> + tibble::add_column(arm = "") +``` + +```{r, data-show} +# first 5 rows of the data +head(data, 5) |> + gt() +``` + +## Minimization randomization + +To generate appropriate research arms, a function called `minimize_results` was written, utilizing the `randomize_minimisation_pocock` function available within the `unbiased` package (@unbiased). The probability parameter was set at the level defined within the function (p = 0.85). In the case of minimization randomization, to verify which type of minimization (with equal weights or unequal weights) was used, three calls to the minimize_results function were prepared: + +- **minimize_equal_weights** - each covariate weight takes a value equal to 1 divided by the number of covariates. In this case, the weight is 1/6, + +- **minimize_unequal_weights** - following the expert assessment by physicians, parameters with potentially significant impact on treatment outcomes (hba1c, tpo2, wound size) have been assigned a weight of 2. The remaining covariates have been assigned a weight of 1. + +- **minimize_unequal_weights_3** - following the expert assessment by physicians, parameters with potentially significant impact on treatment outcomes (hba1c, tpo2, wound size) have been assigned a weight of 3. The remaining covariates have been assigned a weight of 1. + +The tables present information about allocations for the first 5 patients. + +```{r, minimize-results} +# drawing an arm for each patient +minimize_results <- + function(current_data, arms, weights) { + for (n in seq_len(nrow(current_data))) { + current_state <- current_data[1:n, 2:ncol(current_data)] + + current_data$arm[n] <- + randomize_minimisation_pocock( + arms = arms, + current_state = current_state, + weights = weights + ) + } + + return(current_data) + } +``` + +```{r, minimize-equal} +set.seed(123) +# eqal weights - 1/6 +minimize_equal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC") + ) + +head(minimize_equal_weights, 5) |> + gt() +``` + +```{r, minimize-unequal-1} +set.seed(123) +# double weights where the covariant is of high clinical significance +minimize_unequal_weights <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 2, + "tpo2" = 2, + "age" = 1, + "wound_size" = 2 + ) + ) + +head(minimize_unequal_weights, 5) |> + gt() +``` + +```{r, minimize-unequal-2} +set.seed(123) +# triple weights where the covariant is of high clinical significance +minimize_unequal_weights_3 <- + minimize_results( + current_data = data, + arms = c("armA", "armB", "armC"), + weights = c( + "sex" = 1, + "diabetes_type" = 1, + "hba1c" = 3, + "tpo2" = 3, + "age" = 1, + "wound_size" = 3 + ) + ) + +head(minimize_unequal_weights_3, 5) |> + gt() +``` + +The `statistic_table` function was developed to provide information on: the distribution of the number of patients across research arms, and the distribution of covariates across research arms, along with p-value information for statistical analyses used to compare proportions - chi^2, and the exact Fisher's test, typically used for small samples. + +The function relies on the use of the `tbl_summary` function available in the `gtsummary` package (@gtsummary). + +```{r, statistics-table} +# generation of frequency and chi^2 statistic values or fisher exact test +statistics_table <- + function(data) { + data |> + mutate( + sex = ifelse(sex == "1", "men", "women"), + diabetes_type = ifelse(diabetes_type == "1", "type1", "type2"), + hba1c = ifelse(hba1c == "1", "<=9", "(9,11>"), + tpo2 = ifelse(tpo2 == "1", "<=50", ">50"), + age = ifelse(age == "1", "<=55", ">55"), + wound_size = ifelse(wound_size == "1", "<=2", ">2") + ) |> + tbl_summary( + include = c(sex, diabetes_type, hba1c, tpo2, age, wound_size), + by = arm + ) |> + modify_header(label = "") |> + modify_header(all_stat_cols() ~ "**{level}**, N = {n}") |> + bold_labels() |> + add_p() + } +``` + +The table presents a statistical summary of results for the first iteration for: + +- **Minimization with all weights equal to 1/6**. + +```{r, chi2-1, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_equal_weights) +``` + +- **Minimization with weights 2:1**. + +```{r, chi2-2, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_unequal_weights) +``` + +- **Minimization with weights 3:1**. + +```{r, chi2-3, tab.cap = "Summary of proportion test for minimization randomization with equal weights"} +statistics_table(minimize_unequal_weights_3) +``` + +## Simple randomization + +In the next step, appropriate arms were generated for patients using simple randomization, available through the `unbiased` package - the `randomize_simple` function (@unbiased). The `simple_results` function was called within `simple_data`, considering the initial assumption of assigning patients to three arms in a 1:1:1 ratio. + +Since this is simple randomization, it does not take into account the initial covariates, and treatment assignment occurs randomly (flip coin method). The tables illustrate an example of data output and summary statistics including a summary of the statistical tests. + +```{r, simple-result} +# simple randomization +simple_results <- + function(current_data, arms, ratio) { + for (n in seq_len(nrow(current_data))) { + current_data$arm[n] <- + randomize_simple(arms, ratio) + } + + return(current_data) + } +``` + +```{r, simple-data} +set.seed(123) + +simple_data <- + simple_results( + current_data = data, + arms = c("armA", "armB", "armC"), + ratio = c("armB" = 1L, "armA" = 1L, "armC" = 1L) + ) + +head(simple_data, 5) |> + gt() +``` + +```{r, chi2-4, tab.cap = "Summary of proportion test for simple randomization"} +statistics_table(simple_data) +``` + +## Block randomization + +Block randomization, as opposed to minimization and simple randomization methods, was developed based on the `rbprPar` function available in the `randomizeR` package (@randomizeR). Using this, the `block_rand` function was created, which, based on the defined number of patients, arms, and a list of stratifying factors, generates a randomization list with a length equal to the number of patients multiplied by the product of categories in each covariate. In the case of the specified data in the document, for one iteration, it amounts to **105 * 2^6 = 6720 rows**. This ensures that there is an appropriate number of randomisation codes for each opportunity. In the case of equal characteristics, it is certain that there are the right number of codes for the defined `n` patients. + +Based on the `block_rand` function, it is possible to generate a randomisation list, based on which patients will be allocated, with characteristics from the output `data` frame. Due to the 3 arms and the need to blind the allocation of consecutive patients, block sizes 3,6 and 9 were used for the calculations. + +In the next step, patients were assigned to research groups using the `block_results` function (based on the list generated by the function `block_rand`). A first available code from the randomization list that meets specific conditions is selected, and then it is removed from the list of available codes. Based on this, research arms are generated to ensure the appropriate number of patients in each group (based on the assumed ratio of 1:1:1). + +The tables show the assignment of patients to groups using block randomisation and summary statistics including a summary of the statistical tests. + +```{r, block-rand} +# Function to generate a randomisation list +block_rand <- + function(n, block, n_groups, strata, arms = LETTERS[1:n_groups]) { + strata_grid <- expand.grid(strata) + + strata_n <- nrow(strata_grid) + + ratio <- rep(1, n_groups) + + gen_seq_list <- lapply(seq_len(strata_n), function(i) { + rand <- rpbrPar( + N = n, + rb = block, + K = n_groups, + ratio = ratio, + groups = arms, + filledBlock = FALSE + ) + getRandList(genSeq(rand))[1, ] + }) + + df_list <- tibble::tibble() + for (i in seq_len(strata_n)) { + local_df <- strata_grid |> + dplyr::slice(i) |> + dplyr::mutate(count_n = n) |> + tidyr::uncount(count_n) |> + tibble::add_column(rand_arm = gen_seq_list[[i]]) + df_list <- rbind(local_df, df_list) + } + return(df_list) + } +``` + +```{r, block-results} +# Generate a research arm for patients in each iteration +block_results <- function(current_data) { + simulation_result <- + block_rand( + n = n, + block = c(3, 6, 9), + n_groups = 3, + strata = list( + sex = c("0", "1"), + diabetes_type = c("0", "1"), + hba1c = c("0", "1"), + tpo2 = c("0", "1"), + age = c("0", "1"), + wound_size = c("0", "1") + ), + arms = c("armA", "armB", "armC") + ) + + for (n in seq_len(nrow(current_data))) { + # "-1" is for "arm" column + current_state <- current_data[n, 2:(ncol(current_data) - 1)] + + matching_rows <- which(apply( + simulation_result[, -ncol(simulation_result)], 1, + function(row) all(row == current_state) + )) + + if (length(matching_rows) > 0) { + current_data$arm[n] <- + simulation_result[matching_rows[1], "rand_arm"] + + # Delete row from randomization list + simulation_result <- simulation_result[-matching_rows[1], , drop = FALSE] + } + } + + return(current_data) +} +``` + +```{r, block-data-show} +set.seed(123) + +block_data <- + block_results(data) + +head(block_data, 5) |> + gt() +``` + +```{r, chi2-5, tab.cap = "Summary of proportion test for simple randomization"} +statistics_table(block_data) +``` + +## Generate 1000 simulations + +We have performed 1000 iterations of data generation with parameters defined above. The number of iterations indicates the number of iterations included in the Monte-Carlo simulations to accumulate data for the given parameters. This allowed for the generation of data 1000 times for 105 patients to more efficiently assess the effect of randomization methods in the context of covariate balance. + +These data were assigned to the variable `sim_data` based on the data stored in the .Rds file `1000_sim_data.Rds`, available within the vignette information on the GitHub repository of the `unbiased` package. + +```{r, simulations} +# define number of iterations +# no_of_iterations <- 1000 # nolint +# define number of cores +# no_of_cores <- 20 # nolint +# perform simulations (run carefully!) +# source("~/unbiased/vignettes/helpers/run_parallel.R") # nolint + +# read data from file +sim_data <- readRDS("1000_sim_data.Rds") +``` + +## Check balance using smd test + +In order to select the test and define the precision at a specified level, above which we assume no imbalance, a literature analysis was conducted based on publications such as @lee2021estimating, @austin2009balance, @doah2021impact, @brown2020novel, @nguyen2017double, @sanchez2003effect, @lee2022propensity, @berger2021roadmap. + +To assess the balance for covariates between the research groups A, B, C, the Standardized Mean Difference (SMD) test was employed, which compares two groups. Since there are three groups in the example, the SMD test is computed for each pair of comparisons: A vs B, A vs C, and B vs C. The average SMD test for a given covariate is then calculated based on these comparisons. + +In the literature analysis, the precision level ranged between 0.1-0.2. For small samples, it was expected that the SMD test would exceed 0.2 (@austin2009balance). Additionally, according to the publication by @sanchez2003effect, there is no golden standard that dictates a specific threshold for the SMD test to be considered balanced. Generally, the smaller the SMD test, the smaller the difference in covariate imbalance. + +In the analyzed example, due to the sample size of 105 patients, a threshold of 0.2 for the SMD test was adopted. + +A function called `smd_covariants_data` was written to generate frames that produce the SMD test for each covariate in each iteration, utilizing the `CreateTableOne` function available in the `tableone` package (@tableone). In cases where the test result is <0.001, a value of 0 was assigned. + +The results for each randomization method were stored in the `cov_balance_data`. + +```{r, define-strata-vars} +# definied covariants +vars <- c("sex", "age", "diabetes_type", "wound_size", "tpo2", "hba1c") +``` + +```{r, smd-covariants-data} +smd_covariants_data <- + function(data, vars, strata) { + result_table <- + lapply(unique(data$simnr), function(i) { + current_data <- data[data$simnr == i, ] + arms_to_check <- setdiff(names(current_data), c(vars, "id", "simnr")) + # check SMD for any covariants + lapply(arms_to_check, function(arm) { + tab <- + CreateTableOne( + vars = vars, + data = current_data, + strata = arm + ) + + results_smd <- + ExtractSmd(tab) |> + as.data.frame() |> + tibble::rownames_to_column("covariants") |> + select(covariants, results = average) |> + mutate(results = round(as.numeric(results), 3)) + + results <- + bind_cols( + simnr = i, + strata = arm, + results_smd + ) + return(results) + }) |> + bind_rows() + }) |> + bind_rows() + + return(result_table) + } +``` + +```{r, cov-balance-data, echo = TRUE, results='hide'} +cov_balance_data <- + smd_covariants_data( + data = sim_data, + vars = vars + ) |> + mutate(method = case_when( + strata == "minimize_equal_weights_arms" ~ "minimize equal", + strata == "minimize_unequal_weights_arms" ~ "minimize unequal 2:1", + strata == "minimize_unequal_weights_triple_arms" ~ "minimize unequal 3:1", + strata == "simple_data_arms" ~ "simple randomization", + strata == "block_data_arms" ~ "block randomization" + )) |> + select(-strata) +``` + +Below are the results of the SMD test presented in the form of boxplot and violin plot, depicting the outcomes for each randomization method. The red dashed line indicates the adopted precision threshold. + +- **Boxplot of the combined results** + +```{r, boxplot, fig.cap= "Summary average smd in each randomization methods", warning=FALSE, fig.width=9, fig.height=6} +# boxplot +cov_balance_data |> + select(simnr, results, method) |> + group_by(simnr, method) |> + mutate(results = mean(results)) |> + distinct() |> + ggplot(aes(x = method, y = results, fill = method)) + + geom_boxplot() + + geom_hline(yintercept = 0.2, linetype = "dashed", color = "red") + + theme_bw() +``` + +- **Violin plot** + +```{r, violinplot, fig.cap= "Summary smd in each randomization methods in each covariants", warning = FALSE, fig.width=9, fig.height=6} +# violin plot +cov_balance_data |> + ggplot(aes(x = method, y = results, fill = method)) + + geom_violin() + + geom_hline( + yintercept = 0.2, + linetype = "dashed", + color = "red" + ) + + facet_wrap(~covariants, ncol = 3) + + theme_bw() + + theme(axis.text = element_text(angle = 45, vjust = 0.5, hjust = 1)) +``` + +- **Summary table of success** + +Based on the specified precision threshold of 0.2, a function defining randomization success, named `success_power`, was developed. If the SMD test value for each covariate in a given iteration is above 0.2, the function defines the analysis data as 'failure' - 0; otherwise, it is defined as 'success' - 1. + +The final success power is calculated as the sum of successes in each iteration divided by the total number of specified iterations. + +The results are summarized in a table as the percentage of success for each randomization method. + +```{r, success-power} +# function defining success of randomisation +success_power <- + function(cov_data) { + result_table <- + lapply(unique(cov_data$simnr), function(i) { + current_data <- cov_data[cov_data$simnr == i, ] + + current_data |> + group_by(method) |> + summarise(success = ifelse(any(results > 0.2), 0, 1)) |> + tibble::add_column(simnr = i, .before = 1) + }) |> + bind_rows() + + success <- + result_table |> + group_by(method) |> + summarise(results_power = sum(success) / n() * 100) + + + return(success) + } +``` + +```{r, success-result-data, tab.cap = "Summary of percent success in each randomization methods"} +success_power(cov_balance_data) |> + as.data.frame() |> + rename(`power results [%]` = results_power) |> + gt() +``` + +## Conclusion + +Considering all three randomization methods: minimization, block randomization, and simple randomization, minimization performs the best in terms of covariate balance. Simple randomization has a significant drawback, as patient allocation to arms occurs randomly with equal probability. This leads to an imbalance in both the number of patients and covariate balance, which is also random. This is particularly the case with small samples. Balancing the number of patients is possible for larger samples for n > 200. + +On the other hand, block randomization performs very well in balancing the number of patients in groups in a specified allocation ratio. However, compared to adaptive randomisation using the minimisation method, block randomisation has a lower probability in terms of balancing the co-variables. + +Minimization method, provides the highest success power by ensuring balance across covariates between groups. This is made possible by an appropriate algorithm implemented as part of minimisation randomisation. When assigning the next patient to a group, the method examines the total imbalance and then assigns the patient to the appropriate study group with a specified probability to balance the sample in terms of size, and covariates. + +# References + +--- +nocite: '@*' +... diff --git a/vignettes/articles/references.bib b/vignettes/articles/references.bib new file mode 100644 index 0000000..2a1fe58 --- /dev/null +++ b/vignettes/articles/references.bib @@ -0,0 +1,222 @@ +% Encoding: UTF-8 + +@article{lim2019randomization, + title={Randomization in clinical studies}, + author={Lim, Chi-Yeon and In, Junyong}, + journal={Korean journal of anesthesiology}, + volume={72}, + number={3}, + pages={221--232}, + year={2019}, + publisher={Korean Society of Anesthesiologists} +} + + @article{goldfeld2020simstudy, + title = {simstudy: Illuminating research methods through data generation}, + author = {Keith Goldfeld and Jacob Wujciak-Jens}, + publisher = {The Open Journal}, + journal = {Journal of Open Source Software}, + year = {2020}, + volume = {5}, + number = {54}, + pages = {2763}, + url = {https://doi.org/10.21105/joss.02763}, + doi = {10.21105/joss.02763}, + } + + @article{mrozikiewicz2023allogenic, + title={Allogenic Adipose-Derived Stem Cells in Diabetic Foot Ulcer Treatment: Clinical Effectiveness, Safety, Survival in the Wound Site, and Proteomic Impact}, + author={Mrozikiewicz-Rakowska, Beata and Szab{\l}owska-Gadomska, Ilona and Cysewski, Dominik and Rudzi{\'n}ski, Stefan and P{\l}oski, Rafa{\l} and Gasperowicz, Piotr and Konarzewska, Magdalena and Zieli{\'n}ski, Jakub and Mieczkowski, Mateusz and Sie{\'n}ko, Damian and others}, + journal={International Journal of Molecular Sciences}, + volume={24}, + number={2}, + pages={1472}, + year={2023}, + publisher={MDPI} +} + +@article{pocock1975sequential, + title={Sequential treatment assignment with balancing for prognostic factors in the controlled clinical trial}, + author={Pocock, Stuart J and Simon, Richard}, + journal={Biometrics}, + pages={103--115}, + year={1975}, + publisher={JSTOR} +} + +@book{rosenberger2015randomization, + title={Randomization in clinical trials: theory and practice}, + author={Rosenberger, William F and Lachin, John M}, + year={2015}, + publisher={John Wiley \& Sons} +} + +@article{lee2021estimating, + title={Estimating COVID-19 infection and severity risks in patients with chronic rhinosinusitis: a Korean nationwide cohort study}, + author={Lee, Seung Won and Kim, So Young and Moon, Sung Yong and Yang, Jee Myung and Ha, Eun Kyo and Jee, Hye Mi and Shin, Jae Il and Cho, Seong Ho and Yon, Dong Keon and Suh, Dong In}, + journal={The Journal of Allergy and Clinical Immunology: In Practice}, + volume={9}, + number={6}, + pages={2262--2271}, + year={2021}, + publisher={Elsevier} +} + +@article{austin2009balance, + title={Balance diagnostics for comparing the distribution of baseline covariates between treatment groups in propensity-score matched samples}, + author={Austin, Peter C}, + journal={Statistics in medicine}, + volume={28}, + number={25}, + pages={3083--3107}, + year={2009}, + publisher={Wiley Online Library} +} + +@article{doah2021impact, + title={The impact of primary tumor resection on survival in asymptomatic colorectal cancer patients with unresectable metastases}, + author={Doah, Ki Yoon and Shin, Ui Sup and Jeon, Byong Ho and Cho, Sang Sik and Moon, Sun Mi}, + journal={Annals of Coloproctology}, + volume={37}, + number={2}, + pages={94}, + year={2021}, + publisher={Korean Society of Coloproctology} +} + +@article{brown2020novel, + title={A novel approach for propensity score matching and stratification for multiple treatments: Application to an electronic health record--derived study}, + author={Brown, Derek W and DeSantis, Stacia M and Greene, Thomas J and Maroufy, Vahed and Yaseen, Ashraf and Wu, Hulin and Williams, George and Swartz, Michael D}, + journal={Statistics in medicine}, + volume={39}, + number={17}, + pages={2308--2323}, + year={2020}, + publisher={Wiley Online Library} +} + +@article{nguyen2017double, + title={Double-adjustment in propensity score matching analysis: choosing a threshold for considering residual imbalance}, + author={Nguyen, Tri-Long and Collins, Gary S and Spence, Jessica and Daur{\`e}s, Jean-Pierre and Devereaux, PJ and Landais, Paul and Le Manach, Yannick}, + journal={BMC medical research methodology}, + volume={17}, + pages={1--8}, + year={2017}, + publisher={Springer} +} + +@article{sanchez2003effect, + title={Effect-size indices for dichotomized outcomes in meta-analysis.}, + author={S{\'a}nchez-Meca, Julio and Mar{\'\i}n-Mart{\'\i}nez, Fulgencio and Chac{\'o}n-Moscoso, Salvador}, + journal={Psychological methods}, + volume={8}, + number={4}, + pages={448}, + year={2003}, + publisher={American Psychological Association} +} + +@article{lee2022propensity, + title={Propensity score matching for causal inference and reducing the confounding effects: statistical standard and guideline of Life Cycle Committee}, + author={Lee, Seung Won and Acharya, Krishna Prasad and others}, + journal={Life Cycle}, + volume={2}, + year={2022}, + publisher={Life Cycle} +} + +@article{zhang2019balance, + title={Balance diagnostics after propensity score matching}, + author={Zhang, Zhongheng and Kim, Hwa Jung and Lonjon, Guillaume and Zhu, Yibing and others}, + journal={Annals of translational medicine}, + volume={7}, + number={1}, + year={2019}, + publisher={AME Publications} +} + + @Manual{truncnorm, + title = {truncnorm: Truncated Normal Distribution}, + author = {Olaf Mersmann and Heike Trautmann and Detlef Steuer and Björn Bornkamp}, + year = {2023}, + note = {R package version 1.0-9}, + url = {https://github.com/olafmersmann/truncnorm}, + } + +@article{burkardt2014truncated, + title={The truncated normal distribution}, + author={Burkardt, John}, + journal={Department of Scientific Computing Website, Florida State University}, + volume={1}, + pages={35}, + year={2014} +} + + @Manual{tableone, + title = {tableone: Create 'Table 1' to Describe Baseline Characteristics with or +without Propensity Score Weights}, + author = {Kazuki Yoshida and Alexander Bartel}, + year = {2022}, + note = {R package version 0.13.2}, + url = {https://github.com/kaz-yos/tableone}, + } + @article{randomizeR, + title = {{randomizeR}: An {R} Package for the Assessment and Implementation of Randomization in Clinical Trials}, + author = {Diane Uschner and David Schindler and Ralf-Dieter Hilgers and Nicole Heussen}, + journal = {Journal of Statistical Software}, + year = {2018}, + volume = {85}, + number = {8}, + pages = {1--22}, + doi = {10.18637/jss.v085.i08}, + } + + + @article{gtsummary, + author = {Daniel D. Sjoberg and Karissa Whiting and Michael Curry and Jessica A. Lavery and Joseph Larmarange}, + title = {Reproducible Summary Tables with the gtsummary Package}, + journal = {{The R Journal}}, + year = {2021}, + url = {https://doi.org/10.32614/RJ-2021-053}, + doi = {10.32614/RJ-2021-053}, + volume = {13}, + issue = {1}, + pages = {570-580}, + } + +@article{berger2021roadmap, + title={A roadmap to using randomization in clinical trials}, + author={Berger, Vance W and Bour, Louis Joseph and Carter, Kerstine and Chipman, Jonathan J and Everett, Colin C and Heussen, Nicole and Hewitt, Catherine and Hilgers, Ralf-Dieter and Luo, Yuqun Abigail and Renteria, Jone and others}, + journal={BMC Medical Research Methodology}, + volume={21}, + pages={1--24}, + year={2021}, + publisher={Springer} +} + +@article{kang2008issues, + title={Issues in outcomes research: an overview of randomization techniques for clinical trials}, + author={Kang, Minsoo and Ragan, Brian G and Park, Jae-Hyeon}, + journal={Journal of athletic training}, + volume={43}, + number={2}, + pages={215--221}, + year={2008}, + publisher={The National Athletic Trainers' Association, Inc c/o Hughston Sports~
} +} + + @Manual{truncnorm, + title = {truncnorm: Truncated Normal Distribution}, + author = {Olaf Mersmann and Heike Trautmann and Detlef Steuer and Björn Bornkamp}, + year = {2023}, + note = {R package version 1.0-9}, + url = {https://github.com/olafmersmann/truncnorm}, + } + + @Manual{unbiased, + title = {unbiased: Diverse Randomization Algorithms for Clinical Trials}, + author = {Kamil Sijko and Kinga SaƂata and Aleksandra Duda and Ɓukasz WaƂejko}, + year = {2024}, + note = {R package version 1.0.0}, + url = {https://ttscience.github.io/unbiased/}, + } diff --git a/vignettes/boxplot.png b/vignettes/boxplot.png new file mode 100644 index 0000000..aac9a3e Binary files /dev/null and b/vignettes/boxplot.png differ