Skip to content

Commit

Permalink
for CRAN resubmission
Browse files Browse the repository at this point in the history
  • Loading branch information
MatsuuraKentaro committed Oct 2, 2024
1 parent 73a6079 commit 384fb80
Show file tree
Hide file tree
Showing 11 changed files with 107 additions and 45 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Package: RLoptimal
Type: Package
Title: Optimal Adaptive Allocation Using Deep Reinforcement Learning
Version: 1.0.0
Version: 1.0.1
Authors@R: c(
person("Kentaro", "Matsuura", , "[email protected]",
role = c("aut", "cre", "cph"), comment = c(ORCID = "0000-0001-5262-055X")),
Expand Down
23 changes: 18 additions & 5 deletions R/adjust_significance_level.R
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,31 @@
#' @returns A positive numeric value specifying adjusted significance level.
#'
#' @examples
#' # We computed `allocation_rule`
#' # ...
#' library(RLoptimal)
#'
#' doses <- c(0, 2, 4, 6, 8)
#'
#' models <- DoseFinding::Mods(
#' doses = doses, maxEff = 1.65,
#' linear = NULL, emax = 0.79, sigEmax = c(4, 5)
#' )
#'
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' seed = 123, rl_config = rl_config_set(iter = 1000),
#' alpha = 0.025
#' )
#'
#' # Simulation-based adjustment of the significance level using `allocation_rule`
#' \donttest{
#' adjusted_alpha <- adjust_significance_level(
#' allocation_rule, models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' alpha = 0.025, n_sim = 10000, seed = 123
#' )
#' }
#' )}
#'
#' @importFrom stats quantile
#'
Expand Down
10 changes: 4 additions & 6 deletions R/learn_allocation_rule.R
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,14 @@
#'
#' # We obtain an optimal adaptive allocation rule by executing
#' # `learn_allocation_rule()` with the `models`.
#' \donttest{
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' seed = 123, rl_config = rl_config_set(iter = 1000),
#' alpha = 0.025
#' )
#' }
#' )}
#'
#' # It is recommended that the models used in reinforcement learning include
#' # possible models in addition to the models used in the MCPMod method.
Expand All @@ -85,15 +84,14 @@
#' )
#'
#' # Then, we specify the argument `rl_models` in `learn_allocation_rule` function.
#' \donttest{
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' seed = 123, rl_models = rl_models, rl_config = rl_config_set(iter = 1000),
#' alpha = 0.025
#' )
#' }
#' )}
#'
#' @importFrom glue glue
#'
Expand Down
5 changes: 2 additions & 3 deletions R/rl_config_set.R
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
#' @return A list of reinforcement learning configuration parameters
#'
#' @examples
#' \donttest{
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
Expand All @@ -36,8 +36,7 @@
#' # We change `iter` to 200 and `cores` for reinforcement learning to 2
#' rl_config = rl_config_set(iter = 200, cores = 2),
#' alpha = 0.025
#' )
#' }
#' )}
#'
#' @export
rl_config_set <- function(iter = 1000L,
Expand Down
5 changes: 2 additions & 3 deletions R/rl_dnn_config.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#' @return A list of DNN configuration parameters
#'
#' @examples
#' \donttest{
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
Expand All @@ -27,8 +27,7 @@
#' model = rl_dnn_config(fcnet_hiddens = c(512L, 512L), fcnet_activation = "tanh")
#' ),
#' alpha = 0.025
#' )
#' }
#' )}
#'
#' @export
rl_dnn_config <- function(
Expand Down
32 changes: 27 additions & 5 deletions R/simulate_one_trial.R
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,31 @@
#' the estimated target dose, and the MAE.
#'
#' @examples
#' \donttest{
#' # We computed `allocation_rule` and `adjusted_alpha`
#' # ...
#' library(RLoptimal)
#'
#' doses <- c(0, 2, 4, 6, 8)
#'
#' models <- DoseFinding::Mods(
#' doses = doses, maxEff = 1.65,
#' linear = NULL, emax = 0.79, sigEmax = c(4, 5)
#' )
#'
#' \dontrun{
#' allocation_rule <- learn_allocation_rule(
#' models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' seed = 123, rl_config = rl_config_set(iter = 1000),
#' alpha = 0.025
#' )
#'
#' # Simulation-based adjustment of the significance level using `allocation_rule`
#' adjusted_alpha <- adjust_significance_level(
#' allocation_rule, models,
#' N_total = 150, N_ini = rep(10, 5), N_block = 10,
#' outcome_type = "continuous", sd_normal = sqrt(4.5),
#' alpha = 0.025, n_sim = 10000, seed = 123
#' )}
#'
#' eval_models <- DoseFinding::Mods(
#' doses = doses, maxEff = 1.65,
Expand All @@ -51,14 +73,14 @@
#' true_model_name <- "emax"
#'
#' # Simulate one trial using the obtained `allocation_rule` When the true model is "emax"
#' \dontrun{
#' res_one <- simulate_one_trial(
#' allocation_rule, models,
#' true_response = true_response_list[[true_model_name]],
#' N_total = 150, N_ini = rep(10, 5), N_block = 10,
#' Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5),
#' alpha = adjusted_alpha, seed = simID, eval_type = "all"
#' )
#' }
#' )}
#'
#' @importFrom stats coef binomial glm plogis predict rbinom rnorm vcov
#'
Expand Down
23 changes: 18 additions & 5 deletions man/adjust_significance_level.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 4 additions & 6 deletions man/learn_allocation_rule.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 2 additions & 3 deletions man/rl_config_set.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 2 additions & 3 deletions man/rl_dnn_config.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

32 changes: 27 additions & 5 deletions man/simulate_one_trial.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 384fb80

Please sign in to comment.