diff --git a/R/adjust_significance_level.R b/R/adjust_significance_level.R index a9c916f..6e8bec0 100644 --- a/R/adjust_significance_level.R +++ b/R/adjust_significance_level.R @@ -29,7 +29,7 @@ #' # ... #' #' # Simulation-based adjustment of the significance level using `allocation_rule` -#' \dontrun{ +#' \donttest{ #' adjusted_alpha <- adjust_significance_level( #' allocation_rule, models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, diff --git a/R/learn_allocation_rule.R b/R/learn_allocation_rule.R index 4387cf5..624658f 100644 --- a/R/learn_allocation_rule.R +++ b/R/learn_allocation_rule.R @@ -65,7 +65,7 @@ #' #' # We obtain an optimal adaptive allocation rule by executing #' # `learn_allocation_rule()` with the `models`. -#' \dontrun{ +#' \donttest{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -85,7 +85,7 @@ #' ) #' #' # Then, we specify the argument `rl_models` in `learn_allocation_rule` function. -#' \dontrun{ +#' \donttest{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, diff --git a/R/rl_config_set.R b/R/rl_config_set.R index 337a577..3276888 100644 --- a/R/rl_config_set.R +++ b/R/rl_config_set.R @@ -27,14 +27,14 @@ #' @return A list of reinforcement learning configuration parameters #' #' @examples -#' \dontrun{ +#' \donttest{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, #' outcome_type = "continuous", sd_normal = sqrt(4.5), #' seed = 123, -#' # We change `iter` to 200 and `cores` for reinforcement learning to 8 -#' rl_config = rl_config_set(iter = 200, cores = 8), +#' # We change `iter` to 200 and `cores` for reinforcement learning to 2 +#' rl_config = rl_config_set(iter = 200, cores = 2), #' alpha = 0.025 #' ) #' } diff --git a/R/rl_dnn_config.R b/R/rl_dnn_config.R index b38d89d..1ee6a2d 100644 --- a/R/rl_dnn_config.R +++ b/R/rl_dnn_config.R @@ -14,7 +14,7 @@ #' @return A list of DNN configuration parameters #' #' @examples -#' \dontrun{ +#' \donttest{ #' allocation_rule <- learn_allocation_rule( #' models, #' N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, diff --git a/R/simulate_one_trial.R b/R/simulate_one_trial.R index de9f645..91354f7 100644 --- a/R/simulate_one_trial.R +++ b/R/simulate_one_trial.R @@ -37,7 +37,7 @@ #' the estimated target dose, and the MAE. #' #' @examples -#' \dontrun{ +#' \donttest{ #' # We computed `allocation_rule` and `adjusted_alpha` #' # ... #' diff --git a/man/adjust_significance_level.Rd b/man/adjust_significance_level.Rd index ac24acf..99f5866 100644 --- a/man/adjust_significance_level.Rd +++ b/man/adjust_significance_level.Rd @@ -60,7 +60,7 @@ Adjust Significance Level on a Simulation Basis # ... # Simulation-based adjustment of the significance level using `allocation_rule` -\dontrun{ +\donttest{ adjusted_alpha <- adjust_significance_level( allocation_rule, models, N_total = 150, N_ini = rep(10, 5), N_block = 10, diff --git a/man/learn_allocation_rule.Rd b/man/learn_allocation_rule.Rd index 45c236b..7fb32b0 100644 --- a/man/learn_allocation_rule.Rd +++ b/man/learn_allocation_rule.Rd @@ -112,7 +112,7 @@ models <- DoseFinding::Mods( # We obtain an optimal adaptive allocation rule by executing # `learn_allocation_rule()` with the `models`. -\dontrun{ +\donttest{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, @@ -132,7 +132,7 @@ rl_models <- DoseFinding::Mods( ) # Then, we specify the argument `rl_models` in `learn_allocation_rule` function. -\dontrun{ +\donttest{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, diff --git a/man/rl_config_set.Rd b/man/rl_config_set.Rd index 9c01c26..d51e410 100644 --- a/man/rl_config_set.Rd +++ b/man/rl_config_set.Rd @@ -57,14 +57,14 @@ Mainly settings for the arguments of the training() function. Not compatible with the new API stack introduced in Ray 2.10.0. } \examples{ -\dontrun{ +\donttest{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, outcome_type = "continuous", sd_normal = sqrt(4.5), seed = 123, - # We change `iter` to 200 and `cores` for reinforcement learning to 8 - rl_config = rl_config_set(iter = 200, cores = 8), + # We change `iter` to 200 and `cores` for reinforcement learning to 2 + rl_config = rl_config_set(iter = 200, cores = 2), alpha = 0.025 ) } diff --git a/man/rl_dnn_config.Rd b/man/rl_dnn_config.Rd index 7a781ca..06aade3 100644 --- a/man/rl_dnn_config.Rd +++ b/man/rl_dnn_config.Rd @@ -29,7 +29,7 @@ DNN (deep neural network) configuration for reinforcement learning. For detail, see Section 3.2.6 of the original paper. } \examples{ -\dontrun{ +\donttest{ allocation_rule <- learn_allocation_rule( models, N_total = 150, N_ini = rep(10, 5), N_block = 10, Delta = 1.3, diff --git a/man/simulate_one_trial.Rd b/man/simulate_one_trial.Rd index 7b1af22..4c2dd9a 100644 --- a/man/simulate_one_trial.Rd +++ b/man/simulate_one_trial.Rd @@ -74,7 +74,7 @@ the estimated target dose, and the MAE. Simulate One Trial Using an Obtained Optimal Adaptive Allocation Rule } \examples{ -\dontrun{ +\donttest{ # We computed `allocation_rule` and `adjusted_alpha` # ...