From 0010ca6a21ae6c5c07182fd7bfd8ab6f1379eac5 Mon Sep 17 00:00:00 2001 From: Ondrej Slama Date: Thu, 22 Feb 2024 14:02:56 +0000 Subject: [PATCH] update test coverage --- R/utils.R | 4 +- tests/testthat/_snaps/calibrationProfile.md | 110 ++++++++++++ tests/testthat/_snaps/getEstimates.md | 138 +++++++-------- .../calibrationProfile/p1.png | Bin .../calibrationProfile/p2.png | Bin .../calibrationProfile/p3.png | Bin .../calibrationProfile/p5.png | Bin .../calibrationProfile/p6.png | Bin .../calibrationProfile/p7.png | Bin .../calibrationProfile/udf.png | Bin .../riskProfile/cgam.png | Bin .../riskProfile/compare_pc.png | Bin .../riskProfile/errorbar.png | Bin .../riskProfile/p1.png | Bin .../riskProfile/p2.png | Bin .../riskProfile/p3.png | Bin .../riskProfile/p4.png | Bin .../riskProfile/riskProfile-empty.png | Bin .../riskProfile/riskProfile.png | Bin .../riskProfile/riskProfile_prev_best.png | Bin .../riskProfile/riskProfile_rev_order.png | Bin .../riskProfile/show_arguments.png | Bin .../riskProfile/single_pv.png | Bin .../riskProfile/udf.png | Bin .../sensSpec/sensSpec.png | Bin .../sensSpec/sensSpec2.png | Bin .../sensSpec/sensSpec3.png | Bin .../sensSpec/sensSpec4.png | Bin .../sensSpec/sensSpec5.png | Bin .../sensSpec/sensSpec6.png | Bin tests/testthat/test-calibrationProfile.R | 46 +++-- tests/testthat/test-getEstimates.R | 166 ++++++++++-------- tests/testthat/test-riskProfile.R | 25 +-- tests/testthat/test-sensSpec.R | 14 +- tests/testthat/test-utils.R | 47 +++-- 35 files changed, 344 insertions(+), 206 deletions(-) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p1.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p2.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p3.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p5.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p6.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/p7.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/calibrationProfile/udf.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/cgam.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/compare_pc.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/errorbar.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/p1.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/p2.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/p3.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/p4.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/riskProfile-empty.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/riskProfile.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/riskProfile_prev_best.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/riskProfile_rev_order.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/show_arguments.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/single_pv.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/riskProfile/udf.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec2.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec3.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec4.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec5.png (100%) rename tests/testthat/{_snaps => _snaps_graphs_backup}/sensSpec/sensSpec6.png (100%) diff --git a/R/utils.R b/R/utils.R index b7ab0ad..51cb3e3 100644 --- a/R/utils.R +++ b/R/utils.R @@ -318,10 +318,10 @@ add0thPercPV <- function(x) { #' expect_snapshot_file(save_png(ggplot(mtcars) + #' geom_point(aes(hp, mpg))), "riskProfile.png") #' -save_png <- function(code, width = 400, height = 400) { +save_png <- function(code, width = 400, height = 400) { # nocov start path <- tempfile(fileext = ".png") png(path, width = width, height = height) on.exit(dev.off()) print(code) return(path) -} +} # nocov end diff --git a/tests/testthat/_snaps/calibrationProfile.md b/tests/testthat/_snaps/calibrationProfile.md index 5f04274..3a892e7 100644 --- a/tests/testthat/_snaps/calibrationProfile.md +++ b/tests/testthat/_snaps/calibrationProfile.md @@ -53,6 +53,61 @@ outcome score percentile method 1 0.5 0.5098193 0.475 Calibration In The Large +--- + + Code + as.data.frame(out2$data) + Output + method score percentile outcome estimate + 1 gam -0.70851973 0.025 0 0.2280920 + 2 gam -0.31936674 0.050 1 0.3036047 + 3 gam -0.28965963 0.075 0 0.3099183 + 4 gam -0.26854024 0.100 0 0.3144504 + 5 gam -0.21579546 0.125 0 0.3259235 + 6 gam -0.12694900 0.150 1 0.3457244 + 7 gam -0.04228831 0.175 0 0.3651047 + 8 gam 0.01090609 0.200 0 0.3775141 + 9 gam 0.09726820 0.225 0 0.3980008 + 10 gam 0.16686510 0.250 0 0.4147783 + 11 gam 0.20302153 0.275 0 0.4235757 + 12 gam 0.21288369 0.300 1 0.4259840 + 13 gam 0.22782199 0.325 1 0.4296386 + 14 gam 0.25651524 0.350 1 0.4366793 + 15 gam 0.29597482 0.375 1 0.4464032 + 16 gam 0.34292989 0.400 1 0.4580272 + 17 gam 0.34696534 0.425 0 0.4590285 + 18 gam 0.37097378 0.450 0 0.4649923 + 19 gam 0.42893547 0.475 0 0.4794282 + 20 gam 0.53022892 0.500 1 0.5047243 + 21 gam 0.54408209 0.525 1 0.5081849 + 22 gam 0.59036428 0.550 0 0.5197388 + 23 gam 0.64951309 0.575 1 0.5344723 + 24 gam 0.71786211 0.600 0 0.5514213 + 25 gam 0.71962806 0.625 1 0.5518578 + 26 gam 0.76480480 0.650 1 0.5629956 + 27 gam 0.78613501 0.675 1 0.5682330 + 28 gam 0.84226992 0.700 1 0.5819407 + 29 gam 0.84606645 0.725 0 0.5828634 + 30 gam 0.84852363 0.750 0 0.5834604 + 31 gam 0.90794320 0.775 1 0.5978169 + 32 gam 0.92231184 0.800 1 0.6012645 + 33 gam 1.02084053 0.825 0 0.6246197 + 34 gam 1.02111683 0.850 1 0.6246845 + 35 gam 1.04822764 0.875 0 0.6310149 + 36 gam 1.12511740 0.900 0 0.6487194 + 37 gam 1.27747068 0.925 1 0.6825841 + 38 gam 1.32986226 0.950 0 0.6938183 + 39 gam 1.35878197 0.975 1 0.6999234 + 40 gam 1.55167777 1.000 1 0.7387937 + +--- + + Code + out2$citl + Output + outcome score percentile method + 1 0.5 0.5098193 0.475 Calibration In The Large + --- Code @@ -108,6 +163,61 @@ outcome score percentile method 1 0.5 -0.5098193 0.525 Calibration In The Large +--- + + Code + as.data.frame(out5$data) + Output + method score percentile outcome estimate + 1 gam -0.70851973 0.025 0 0.2280920 + 2 gam -0.31936674 0.050 1 0.3036047 + 3 gam -0.28965963 0.075 0 0.3099183 + 4 gam -0.26854024 0.100 0 0.3144504 + 5 gam -0.21579546 0.125 0 0.3259235 + 6 gam -0.12694900 0.150 1 0.3457244 + 7 gam -0.04228831 0.175 0 0.3651047 + 8 gam 0.01090609 0.200 0 0.3775141 + 9 gam 0.09726820 0.225 0 0.3980008 + 10 gam 0.16686510 0.250 0 0.4147783 + 11 gam 0.20302153 0.275 0 0.4235757 + 12 gam 0.21288369 0.300 1 0.4259840 + 13 gam 0.22782199 0.325 1 0.4296386 + 14 gam 0.25651524 0.350 1 0.4366793 + 15 gam 0.29597482 0.375 1 0.4464032 + 16 gam 0.34292989 0.400 1 0.4580272 + 17 gam 0.34696534 0.425 0 0.4590285 + 18 gam 0.37097378 0.450 0 0.4649923 + 19 gam 0.42893547 0.475 0 0.4794282 + 20 gam 0.53022892 0.500 1 0.5047243 + 21 gam 0.54408209 0.525 1 0.5081849 + 22 gam 0.59036428 0.550 0 0.5197388 + 23 gam 0.64951309 0.575 1 0.5344723 + 24 gam 0.71786211 0.600 0 0.5514213 + 25 gam 0.71962806 0.625 1 0.5518578 + 26 gam 0.76480480 0.650 1 0.5629956 + 27 gam 0.78613501 0.675 1 0.5682330 + 28 gam 0.84226992 0.700 1 0.5819407 + 29 gam 0.84606645 0.725 0 0.5828634 + 30 gam 0.84852363 0.750 0 0.5834604 + 31 gam 0.90794320 0.775 1 0.5978169 + 32 gam 0.92231184 0.800 1 0.6012645 + 33 gam 1.02084053 0.825 0 0.6246197 + 34 gam 1.02111683 0.850 1 0.6246845 + 35 gam 1.04822764 0.875 0 0.6310149 + 36 gam 1.12511740 0.900 0 0.6487194 + 37 gam 1.27747068 0.925 1 0.6825841 + 38 gam 1.32986226 0.950 0 0.6938183 + 39 gam 1.35878197 0.975 1 0.6999234 + 40 gam 1.55167777 1.000 1 0.7387937 + +--- + + Code + out5$citl + Output + outcome score percentile method + 1 0.5 0.5098193 0.475 Calibration In The Large + --- Code diff --git a/tests/testthat/_snaps/getEstimates.md b/tests/testthat/_snaps/getEstimates.md index 08511f2..a392815 100644 --- a/tests/testthat/_snaps/getEstimates.md +++ b/tests/testthat/_snaps/getEstimates.md @@ -238,7 +238,7 @@ # getGAMest returns estimates of correct dimensions Code - res + res1 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.6833384 @@ -285,7 +285,7 @@ --- Code - res + res2 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.7387937 @@ -332,7 +332,7 @@ --- Code - res + res3 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.6354516 @@ -379,7 +379,7 @@ --- Code - res + res4 Output score percentile outcome estimate 1 1.55167777 1.00000000 1 0.6869731 @@ -419,7 +419,7 @@ # getCGAMest works Code - res + res1 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.6899800 @@ -466,7 +466,7 @@ --- Code - res + res2 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.8173842 @@ -513,7 +513,7 @@ # getMSPLINEest returns estimates of correct dimensions Code - res + res1 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.6865854 @@ -560,7 +560,7 @@ --- Code - res + res2 Output score percentile outcome estimate 1 1.55167777 1.000 1 0.7473018 @@ -1343,6 +1343,67 @@ 7 0.900 8 1.000 +--- + + Code + out$obslvl + Output + outcome score riskpercentile bin interval min max + 1 0 -0.70851973 0.025 1 [0,0.4] 0 0.4 + 2 1 -0.31936674 0.050 1 [0,0.4] 0 0.4 + 3 0 -0.28965963 0.075 1 [0,0.4] 0 0.4 + 4 0 -0.26854024 0.100 1 [0,0.4] 0 0.4 + 5 0 -0.21579546 0.125 1 [0,0.4] 0 0.4 + 6 1 -0.12694900 0.150 1 [0,0.4] 0 0.4 + 7 0 -0.04228831 0.175 1 [0,0.4] 0 0.4 + 8 0 0.01090609 0.200 1 [0,0.4] 0 0.4 + 9 0 0.09726820 0.225 1 [0,0.4] 0 0.4 + 10 0 0.16686510 0.250 1 [0,0.4] 0 0.4 + 11 0 0.20302153 0.275 1 [0,0.4] 0 0.4 + 12 1 0.21288369 0.300 1 [0,0.4] 0 0.4 + 13 1 0.22782199 0.325 1 [0,0.4] 0 0.4 + 14 1 0.25651524 0.350 1 [0,0.4] 0 0.4 + 15 1 0.29597482 0.375 1 [0,0.4] 0 0.4 + 16 1 0.34292989 0.400 1 [0,0.4] 0 0.4 + 17 0 0.34696534 0.425 2 (0.4,0.75] 0.4 0.75 + 18 0 0.37097378 0.450 2 (0.4,0.75] 0.4 0.75 + 19 0 0.42893547 0.475 2 (0.4,0.75] 0.4 0.75 + 20 1 0.53022892 0.500 2 (0.4,0.75] 0.4 0.75 + 21 1 0.54408209 0.525 2 (0.4,0.75] 0.4 0.75 + 22 0 0.59036428 0.550 2 (0.4,0.75] 0.4 0.75 + 23 1 0.64951309 0.575 2 (0.4,0.75] 0.4 0.75 + 24 0 0.71786211 0.600 2 (0.4,0.75] 0.4 0.75 + 25 1 0.71962806 0.625 2 (0.4,0.75] 0.4 0.75 + 26 1 0.76480480 0.650 2 (0.4,0.75] 0.4 0.75 + 27 1 0.78613501 0.675 2 (0.4,0.75] 0.4 0.75 + 28 1 0.84226992 0.700 2 (0.4,0.75] 0.4 0.75 + 29 0 0.84606645 0.725 2 (0.4,0.75] 0.4 0.75 + 30 0 0.84852363 0.750 2 (0.4,0.75] 0.4 0.75 + 31 1 0.90794320 0.775 3 (0.75,1] 0.75 1 + 32 1 0.92231184 0.800 3 (0.75,1] 0.75 1 + 33 0 1.02084053 0.825 3 (0.75,1] 0.75 1 + 34 1 1.02111683 0.850 3 (0.75,1] 0.75 1 + 35 0 1.04822764 0.875 3 (0.75,1] 0.75 1 + 36 0 1.12511740 0.900 3 (0.75,1] 0.75 1 + 37 1 1.27747068 0.925 3 (0.75,1] 0.75 1 + 38 0 1.32986226 0.950 3 (0.75,1] 0.75 1 + 39 1 1.35878197 0.975 3 (0.75,1] 0.75 1 + 40 1 1.55167777 1.000 3 (0.75,1] 0.75 1 + +--- + + Code + as.data.frame(out$binlvl) + Output + bin interval n events avg.outcome sd.outcome avg.risk sd.risk + 1 1 [0,0.4] 16 7 0.4375 0.5123475 -0.009808285 0.2909537 + 2 2 (0.4,0.75] 14 7 0.5000 0.5188745 0.641882355 0.1760115 + 3 3 (0.75,1] 10 6 0.6000 0.5163978 1.156335012 0.2128735 + riskpercentile + 1 0.40 + 2 0.75 + 3 1.00 + --- Code @@ -1414,64 +1475,3 @@ 7 0.900 8 1.000 ---- - - Code - out$obslvl - Output - outcome score riskpercentile bin interval min max - 1 0 -0.70851973 0.025 1 [0,0.4] 0 0.4 - 2 1 -0.31936674 0.050 1 [0,0.4] 0 0.4 - 3 0 -0.28965963 0.075 1 [0,0.4] 0 0.4 - 4 0 -0.26854024 0.100 1 [0,0.4] 0 0.4 - 5 0 -0.21579546 0.125 1 [0,0.4] 0 0.4 - 6 1 -0.12694900 0.150 1 [0,0.4] 0 0.4 - 7 0 -0.04228831 0.175 1 [0,0.4] 0 0.4 - 8 0 0.01090609 0.200 1 [0,0.4] 0 0.4 - 9 0 0.09726820 0.225 1 [0,0.4] 0 0.4 - 10 0 0.16686510 0.250 1 [0,0.4] 0 0.4 - 11 0 0.20302153 0.275 1 [0,0.4] 0 0.4 - 12 1 0.21288369 0.300 1 [0,0.4] 0 0.4 - 13 1 0.22782199 0.325 1 [0,0.4] 0 0.4 - 14 1 0.25651524 0.350 1 [0,0.4] 0 0.4 - 15 1 0.29597482 0.375 1 [0,0.4] 0 0.4 - 16 1 0.34292989 0.400 1 [0,0.4] 0 0.4 - 17 0 0.34696534 0.425 2 (0.4,0.75] 0.4 0.75 - 18 0 0.37097378 0.450 2 (0.4,0.75] 0.4 0.75 - 19 0 0.42893547 0.475 2 (0.4,0.75] 0.4 0.75 - 20 1 0.53022892 0.500 2 (0.4,0.75] 0.4 0.75 - 21 1 0.54408209 0.525 2 (0.4,0.75] 0.4 0.75 - 22 0 0.59036428 0.550 2 (0.4,0.75] 0.4 0.75 - 23 1 0.64951309 0.575 2 (0.4,0.75] 0.4 0.75 - 24 0 0.71786211 0.600 2 (0.4,0.75] 0.4 0.75 - 25 1 0.71962806 0.625 2 (0.4,0.75] 0.4 0.75 - 26 1 0.76480480 0.650 2 (0.4,0.75] 0.4 0.75 - 27 1 0.78613501 0.675 2 (0.4,0.75] 0.4 0.75 - 28 1 0.84226992 0.700 2 (0.4,0.75] 0.4 0.75 - 29 0 0.84606645 0.725 2 (0.4,0.75] 0.4 0.75 - 30 0 0.84852363 0.750 2 (0.4,0.75] 0.4 0.75 - 31 1 0.90794320 0.775 3 (0.75,1] 0.75 1 - 32 1 0.92231184 0.800 3 (0.75,1] 0.75 1 - 33 0 1.02084053 0.825 3 (0.75,1] 0.75 1 - 34 1 1.02111683 0.850 3 (0.75,1] 0.75 1 - 35 0 1.04822764 0.875 3 (0.75,1] 0.75 1 - 36 0 1.12511740 0.900 3 (0.75,1] 0.75 1 - 37 1 1.27747068 0.925 3 (0.75,1] 0.75 1 - 38 0 1.32986226 0.950 3 (0.75,1] 0.75 1 - 39 1 1.35878197 0.975 3 (0.75,1] 0.75 1 - 40 1 1.55167777 1.000 3 (0.75,1] 0.75 1 - ---- - - Code - as.data.frame(out$binlvl) - Output - bin interval n events avg.outcome sd.outcome avg.risk sd.risk - 1 1 [0,0.4] 16 7 0.4375 0.5123475 -0.009808285 0.2909537 - 2 2 (0.4,0.75] 14 7 0.5000 0.5188745 0.641882355 0.1760115 - 3 3 (0.75,1] 10 6 0.6000 0.5163978 1.156335012 0.2128735 - riskpercentile - 1 0.40 - 2 0.75 - 3 1.00 - diff --git a/tests/testthat/_snaps/calibrationProfile/p1.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p1.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p1.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p1.png diff --git a/tests/testthat/_snaps/calibrationProfile/p2.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p2.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p2.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p2.png diff --git a/tests/testthat/_snaps/calibrationProfile/p3.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p3.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p3.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p3.png diff --git a/tests/testthat/_snaps/calibrationProfile/p5.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p5.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p5.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p5.png diff --git a/tests/testthat/_snaps/calibrationProfile/p6.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p6.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p6.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p6.png diff --git a/tests/testthat/_snaps/calibrationProfile/p7.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/p7.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/p7.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/p7.png diff --git a/tests/testthat/_snaps/calibrationProfile/udf.png b/tests/testthat/_snaps_graphs_backup/calibrationProfile/udf.png similarity index 100% rename from tests/testthat/_snaps/calibrationProfile/udf.png rename to tests/testthat/_snaps_graphs_backup/calibrationProfile/udf.png diff --git a/tests/testthat/_snaps/riskProfile/cgam.png b/tests/testthat/_snaps_graphs_backup/riskProfile/cgam.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/cgam.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/cgam.png diff --git a/tests/testthat/_snaps/riskProfile/compare_pc.png b/tests/testthat/_snaps_graphs_backup/riskProfile/compare_pc.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/compare_pc.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/compare_pc.png diff --git a/tests/testthat/_snaps/riskProfile/errorbar.png b/tests/testthat/_snaps_graphs_backup/riskProfile/errorbar.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/errorbar.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/errorbar.png diff --git a/tests/testthat/_snaps/riskProfile/p1.png b/tests/testthat/_snaps_graphs_backup/riskProfile/p1.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/p1.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/p1.png diff --git a/tests/testthat/_snaps/riskProfile/p2.png b/tests/testthat/_snaps_graphs_backup/riskProfile/p2.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/p2.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/p2.png diff --git a/tests/testthat/_snaps/riskProfile/p3.png b/tests/testthat/_snaps_graphs_backup/riskProfile/p3.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/p3.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/p3.png diff --git a/tests/testthat/_snaps/riskProfile/p4.png b/tests/testthat/_snaps_graphs_backup/riskProfile/p4.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/p4.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/p4.png diff --git a/tests/testthat/_snaps/riskProfile/riskProfile-empty.png b/tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile-empty.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/riskProfile-empty.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile-empty.png diff --git a/tests/testthat/_snaps/riskProfile/riskProfile.png b/tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/riskProfile.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile.png diff --git a/tests/testthat/_snaps/riskProfile/riskProfile_prev_best.png b/tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile_prev_best.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/riskProfile_prev_best.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile_prev_best.png diff --git a/tests/testthat/_snaps/riskProfile/riskProfile_rev_order.png b/tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile_rev_order.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/riskProfile_rev_order.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/riskProfile_rev_order.png diff --git a/tests/testthat/_snaps/riskProfile/show_arguments.png b/tests/testthat/_snaps_graphs_backup/riskProfile/show_arguments.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/show_arguments.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/show_arguments.png diff --git a/tests/testthat/_snaps/riskProfile/single_pv.png b/tests/testthat/_snaps_graphs_backup/riskProfile/single_pv.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/single_pv.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/single_pv.png diff --git a/tests/testthat/_snaps/riskProfile/udf.png b/tests/testthat/_snaps_graphs_backup/riskProfile/udf.png similarity index 100% rename from tests/testthat/_snaps/riskProfile/udf.png rename to tests/testthat/_snaps_graphs_backup/riskProfile/udf.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec2.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec2.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec2.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec2.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec3.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec3.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec3.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec3.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec4.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec4.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec4.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec4.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec5.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec5.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec5.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec5.png diff --git a/tests/testthat/_snaps/sensSpec/sensSpec6.png b/tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec6.png similarity index 100% rename from tests/testthat/_snaps/sensSpec/sensSpec6.png rename to tests/testthat/_snaps_graphs_backup/sensSpec/sensSpec6.png diff --git a/tests/testthat/test-calibrationProfile.R b/tests/testthat/test-calibrationProfile.R index d6f2fdb..1bb077d 100644 --- a/tests/testthat/test-calibrationProfile.R +++ b/tests/testthat/test-calibrationProfile.R @@ -3,46 +3,56 @@ set.seed(3423) rscore <- rnorm(40, 0.5, 0.5) test_that("calibrationProfile works", { + + expect_error( + calibrationProfile(outcome = outcome, score = rscore, methods = "asis"), + "not suitable" + ) + out1 <- calibrationProfile(outcome = outcome, score = rscore) - expect_snapshot(as.data.frame(out1$data)) - expect_snapshot(out1$citl) - out2 <- calibrationProfile(outcome = outcome, score = rscore, plot.raw = FALSE) - out3 <- calibrationProfile(outcome = outcome, score = rscore, rev.order = TRUE) - expect_snapshot(as.data.frame(out3$data)) - expect_snapshot(out3$citl) - + out4 <- calibrationProfile( outcome = outcome, score = rscore, margin.type = "histogram", bins = 50 ) - expect_s3_class(out4$plot, "gtable") - + out5 <- calibrationProfile( outcome = outcome, score = rscore, include = c("citl", "datapoints", "rug", "loess") ) - + out6 <- calibrationProfile( outcome = outcome, score = rscore, methods = c("BINNED", "PAVA", "MSPLINE", "gam") ) - expect_snapshot(as.data.frame(out6$data)) - expect_snapshot(out6$citl) - + out7 <- calibrationProfile( outcome = outcome, score = rscore, methods = "gam", include = NULL ) + + expect_snapshot(as.data.frame(out1$data)) + expect_snapshot(out1$citl) + + expect_snapshot(as.data.frame(out2$data)) + expect_snapshot(out2$citl) + + expect_snapshot(as.data.frame(out3$data)) + expect_snapshot(out3$citl) + + expect_s3_class(out4$plot, "gtable") + + expect_snapshot(as.data.frame(out5$data)) + expect_snapshot(out5$citl) + + expect_snapshot(as.data.frame(out6$data)) + expect_snapshot(out6$citl) + expect_snapshot(as.data.frame(out7$data)) expect_snapshot(out7$citl) - expect_error( - calibrationProfile(outcome = outcome, score = rscore, methods = "asis"), - "not suitable" - ) - # expect_snapshot_file(save_png(out1$plot), "p1.png") # expect_snapshot_file(save_png(out2$plot), "p2.png") # expect_snapshot_file(save_png(out3$plot), "p3.png") diff --git a/tests/testthat/test-getEstimates.R b/tests/testthat/test-getEstimates.R index 8d6bc05..36d7bce 100644 --- a/tests/testthat/test-getEstimates.R +++ b/tests/testthat/test-getEstimates.R @@ -85,44 +85,100 @@ test_that("getPAVAest returns estimates of correct dimensions", { }) test_that("getGAMest returns estimates of correct dimensions", { - res <- getGAMest(outcome, rscore) - expect_equal(nrow(res), length(rscore)) - expect_equal(ncol(res), 4) - expect_snapshot(res) + + res1 <- getGAMest(outcome, rscore) + expect_equal(nrow(res1), length(rscore)) + expect_equal(ncol(res1), 4) - res <- getGAMest(outcome, rscore, fitonPerc = FALSE) - expect_snapshot(res) + res2 <- getGAMest(outcome, rscore, fitonPerc = FALSE) + res3 <- getGAMest(outcome, rscore, logscores = TRUE) + res4 <- getGAMest(outcome[rscore > 0], rscore[rscore > 0], logscores = TRUE, fitonPerc = FALSE) - res <- getGAMest(outcome, rscore, logscores = TRUE) - expect_snapshot(res) + expect_snapshot(res1) + expect_snapshot(res2) + expect_snapshot(res3) + expect_snapshot(res4) - res <- getGAMest(outcome[rscore > 0], rscore[rscore > 0], logscores = TRUE, fitonPerc = FALSE) - expect_snapshot(res) }) test_that("getCGAMest works", { - res <- getCGAMest(outcome, rscore) - expect_equal(nrow(res), length(rscore)) - expect_equal(ncol(res), 4) - expect_snapshot(res) + + res1 <- getCGAMest(outcome, rscore) + expect_equal(nrow(res1), length(rscore)) + expect_equal(ncol(res1), 4) - res <- getCGAMest(outcome, rscore, fitonPerc = FALSE) - expect_snapshot(res) + res2 <- getCGAMest(outcome, rscore, fitonPerc = FALSE) + + expect_snapshot(res1) + expect_snapshot(res2) }) test_that("getMSPLINEest returns estimates of correct dimensions", { - res <- getMSPLINEest(outcome, rscore) - expect_equal(nrow(res), length(rscore)) - expect_equal(ncol(res), 4) - expect_snapshot(res) + + res1 <- getMSPLINEest(outcome, rscore) + + expect_equal(nrow(res1), length(rscore)) + expect_equal(ncol(res1), 4) - res <- getMSPLINEest(outcome, rscore, fitonPerc = FALSE) - expect_snapshot(res) + res2 <- getMSPLINEest(outcome, rscore, fitonPerc = FALSE) + + expect_snapshot(res1) + expect_snapshot(res2) }) test_that("getEsts returns estimates of correct dimensions and correct number of methods", { + + expect_error( + getEsts( + methods = list(my_est = function(outcome, score) {"dummy"}), + outcome = outcome, + score = rscore + ), + "data.frame of 4 columns" + ) + + expect_error( + getEsts( + methods = list(my_est = function(outcome, score) { + data.frame(score = "a", percentile = "b", outcome = "c", estimate = 0) + }), + outcome = outcome, + score = rscore + ), + "not numeric" + ) + + expect_error( + getEsts(methods = methodCheck("dummy"), outcome = outcome, score = score), + "is not yet available" + ) + + expect_error( + getEsts( + methods = methodCheck(list(dummy = list(method = "dummy"))), + outcome = outcome, score = rscore + ), + "is not yet available" + ) + + expect_error( + getEsts( + methods = methodCheck(list(user_gam = function(x) {})), + outcome = outcome, score = rscore + ), + "exactly two arguments" + ) + + expect_error( + getEsts( + methods = methodCheck(list(asis1 = list(method = "asis"), asis2 = list(method = "asis"))), + outcome = outcome, score = rscore + ), + "just once" + ) + res <- getEsts(methods = list(GAM = list(method = "gam")), outcome = outcome, score = rscore) expect_equal(nrow(res$plotdata), length(rscore)) @@ -195,54 +251,6 @@ test_that("getEsts returns estimates of correct dimensions and correct number of expect_equal(res$idx.binned, c("g" = F, "b" = T, "r" = F)) expect_equal(res$idx.pava, c("g" = F, "b" = F, "r" = F)) - expect_error( - getEsts( - methods = list(my_est = function(outcome, score) {"dummy"}), - outcome = outcome, - score = rscore - ), - "data.frame of 4 columns" - ) - - expect_error( - getEsts( - methods = list(my_est = function(outcome, score) { - data.frame(score = "a", percentile = "b", outcome = "c", estimate = 0) - }), - outcome = outcome, - score = rscore - ), - "not numeric" - ) - - expect_error( - getEsts(methods = methodCheck("dummy"), outcome = outcome, score = score), - "is not yet available" - ) - - expect_error( - getEsts( - methods = methodCheck(list(dummy = list(method = "dummy"))), - outcome = outcome, score = rscore - ), - "is not yet available" - ) - - expect_error( - getEsts( - methods = methodCheck(list(user_gam = function(x) {})), - outcome = outcome, score = rscore - ), - "exactly two arguments" - ) - - expect_error( - getEsts( - methods = methodCheck(list(asis1 = list(method = "asis"), asis2 = list(method = "asis"))), - outcome = outcome, score = rscore - ), - "just once" - ) }) test_that("asis method works in getEsts", { @@ -300,6 +308,12 @@ test_that("getConstraints works", { test_that("summaryTable argument returns appropriately sized dataframe when requested.", { + + expect_error( + getSummaries(outcome, rscore, bins = numeric(0), quantiles = NULL), + "Unrecognized option" + ) + expect_equal(nrow(getSummaries(outcome, rscore, quantiles = 10)$binlvl), 10) expect_equal(ncol(getSummaries(outcome, rscore, quantiles = 10)$binlvl), 9) @@ -320,18 +334,14 @@ test_that("summaryTable argument returns appropriately sized dataframe when requ expect_snapshot(out$obslvl) expect_snapshot(as.data.frame(out$binlvl)) - # test right - out <- getSummaries(outcome, rscore, bins = 8, right = FALSE) + # test bins - 2 + out <- getSummaries(outcome, rscore, bins = c(0, 0.4, 0.75, 1)) expect_snapshot(out$obslvl) expect_snapshot(as.data.frame(out$binlvl)) - # test bins - 2 - out <- getSummaries(outcome, rscore, bins = c(0, 0.4, 0.75, 1)) + # test right + out <- getSummaries(outcome, rscore, bins = 8, right = FALSE) expect_snapshot(out$obslvl) expect_snapshot(as.data.frame(out$binlvl)) - expect_error( - getSummaries(outcome, rscore, bins = numeric(0), quantiles = NULL), - "Unrecognized option" - ) }) diff --git a/tests/testthat/test-riskProfile.R b/tests/testthat/test-riskProfile.R index 46f8566..f1bbae7 100644 --- a/tests/testthat/test-riskProfile.R +++ b/tests/testthat/test-riskProfile.R @@ -3,26 +3,21 @@ set.seed(3423) rscore <- rnorm(40, 0.5, 0.5) test_that("riskProfile works", { + out1 <- riskProfile( outcome = outcome, score = rscore, methods = c("MSPLINE", "GAM", "BINNED", "PAVA"), rev.order = FALSE ) - - expect_snapshot(as.data.frame(out1$data)) - expect_null(out1$errorbar) - + out2 <- riskProfile( outcome = outcome, score = rscore, methods = c("MSPLINE", "GAM", "BINNED", "PAVA"), rev.order = TRUE ) - - expect_snapshot(as.data.frame(out2$data)) - expect_null(out2$errorbar) - + out3 <- riskProfile( outcome = outcome, score = rscore, @@ -30,10 +25,7 @@ test_that("riskProfile works", { rev.order = FALSE, prev.adj = 0.37 ) - - expect_snapshot(as.data.frame(out3$data)) - expect_null(out3$errorbar) - + out4 <- riskProfile( outcome = outcome, score = rscore, @@ -42,6 +34,15 @@ test_that("riskProfile works", { show.nonparam.pv = FALSE, include = c("PPV", "NPV", "1-NPV") ) + + expect_snapshot(as.data.frame(out1$data)) + expect_null(out1$errorbar) + + expect_snapshot(as.data.frame(out2$data)) + expect_null(out2$errorbar) + + expect_snapshot(as.data.frame(out3$data)) + expect_null(out3$errorbar) expect_equal(nrow(out4$data), 246) expect_null(out4$errorbar) diff --git a/tests/testthat/test-sensSpec.R b/tests/testthat/test-sensSpec.R index cc88929..ffbd1a7 100644 --- a/tests/testthat/test-sensSpec.R +++ b/tests/testthat/test-sensSpec.R @@ -20,19 +20,17 @@ test_that("sensSpec can order scores", { }) test_that("sensSpec works", { + res2 <- sensSpec(outcome = outcome, score = rscore, rev.order = T, plot.raw = F) - expect_snapshot(as.data.frame(res2$data)) - res3 <- sensSpec(outcome = outcome, score = rscore, rev.order = F, plot.raw = F) - expect_snapshot(as.data.frame(res3$data)) - res4 <- sensSpec(outcome = outcome, score = rscore, rev.order = F, plot.raw = T) - expect_snapshot(as.data.frame(res4$data)) - res5 <- sensSpec(outcome = outcome, score = rscore, rev.order = T, plot.raw = T) - expect_snapshot(as.data.frame(res5$data)) - res6 <- sensSpec(outcome = outcome, score = rscore, methods = c("asis", "gam", "pava")) + + expect_snapshot(as.data.frame(res2$data)) + expect_snapshot(as.data.frame(res3$data)) + expect_snapshot(as.data.frame(res4$data)) + expect_snapshot(as.data.frame(res5$data)) expect_snapshot(as.data.frame(res6$data)) # expect_snapshot_file(save_png(res2$plot), "sensSpec2.png") diff --git a/tests/testthat/test-utils.R b/tests/testthat/test-utils.R index d5c295f..9e7c958 100644 --- a/tests/testthat/test-utils.R +++ b/tests/testthat/test-utils.R @@ -4,34 +4,37 @@ rscore <- rnorm(40, 0.5, 0.5) test_that("inputCheck works", { - res <- inputCheck(outcome, rscore) - - expect_equal(length(res$outcome), length(res$score)) - expect_snapshot(res) - expect_equal(is.numeric(res$outcome), TRUE) - expect_equal(is.numeric(res$score), TRUE) - expect_error(inputCheck(outcome = c(1, 0), score = c(0.1, 0.2)), "observations") + + expect_error(inputCheck(c(1, 0, 1), c("a", "b", "c")), "numeric") + expect_error(inputCheck(c("a", "b"), c(0.1, 0.2)), "numeric") + expect_error(inputCheck(c(0, 1, 0), c(0.1, 0.2)), "same lengths") + expect_error(inputCheck(c(1, 0), c(0.1, 0.2)), "observations") res <- suppressWarnings(inputCheck(c(1, 0, 1, NA, 0), c(0.1, NA, 0.2, 0.3, 0.4))) expect_equal(res, list(outcome = c(1, 1, 0), score = c(0.1, 0.2, 0.4))) res <- suppressWarnings(inputCheck(c(1, 0, 1, NA, 0), c(0.1, NA, 0.2, 0.3, 0.4))) expect_equal(res, list(outcome = c(1, 1, 0), score = c(0.1, 0.2, 0.4))) - - expect_equal( - inputCheck(rep(c(TRUE, FALSE), 5), 1:10 / 10), - list(outcome = as.numeric(rep(c(TRUE, FALSE), 5)), score = 1:10 / 10) - ) - + expect_error(inputCheck(c(1, 0, 2), c(0.1, 0.2, 0.3)), "as binary") - expect_error(inputCheck(c(1, 0, 1), c("a", "b", "c")), "numeric") - expect_error(inputCheck(c("a", "b"), c(0.1, 0.2)), "numeric") - expect_error(inputCheck(c(0, 1, 0), c(0.1, 0.2)), "same lengths") + expect_warning( inputCheck(c(1, rep(0, 49)), 1:50), "There is a low frequency" ) + res <- inputCheck(outcome, rscore) + + expect_equal(length(res$outcome), length(res$score)) + expect_snapshot(res) + expect_equal(is.numeric(res$outcome), TRUE) + expect_equal(is.numeric(res$score), TRUE) + + expect_equal( + inputCheck(rep(c(TRUE, FALSE), 5), 1:10 / 10), + list(outcome = as.numeric(rep(c(TRUE, FALSE), 5)), score = 1:10 / 10) + ) + rscore[1] <- NA expect_warning(inputCheck(outcome, rscore)) }) @@ -78,6 +81,8 @@ test_that("methods returns appropriate length list of lists", { methodCheck(list(m1 = list(method = NULL))), "specifying one of the predefined estimation functions" ) + + expect_error(methodCheck(c("gamm")), "not yet available") }) @@ -138,6 +143,10 @@ test_that("add0thPercPV works", { }) -# test_that("save_png works", { -# expect_type(save_png(plot(mtcars$hp ~ mtcars$mpg)), "character") -# }) +test_that("listMethodCheck works", { + + expect_error(listMethodCheck(list(method = "my_method")), "not yet available") + + expect_error(listMethodCheck(function(x) {x}), "exactly two arguments") + +})