From d867ebc3fd089d099a9bf0b0d2254fafb3ad3284 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Mon, 30 Oct 2023 11:08:12 -0700 Subject: [PATCH 01/13] initial contrast analysis commit --- R/contrast_analysis.R | 125 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 R/contrast_analysis.R diff --git a/R/contrast_analysis.R b/R/contrast_analysis.R new file mode 100644 index 0000000..de474d0 --- /dev/null +++ b/R/contrast_analysis.R @@ -0,0 +1,125 @@ +contrast_analysis <- function(dframe, vars, siteID = NULL, weight = "weight", xcoord, ycoord, vartype = "Local", + conf = 95, statistics = c("Mean", "Total")) { + + + + if (is.null(siteID)) { + siteID <- "siteID" + dframe$siteID <- paste("site", seq_len(nrow(dframe)), sep = "-") + } + + # create design object + stratum_ind <- FALSE + stratumID <- NULL + cluster_ind <- FALSE + clusterID <- NULL + weight1 <- NULL + sizeweight <- FALSE + sweight <- NULL + sweight1 <- NULL + fpcfactor_ind <- FALSE + fpcsize <- NULL + Ncluster <- NULL + stage1size <- NULL + jointprob <- NULL + + design <- survey_design( + dframe, siteID, weight, stratum_ind, stratumID, cluster_ind, clusterID, + weight1, sizeweight, sweight, sweight1, fpcfactor_ind, fpcsize, Ncluster, + stage1size, vartype, jointprob + ) + + all_vars <- all.vars(vars) + # incorporate form when all_vars has only length one (function of single variable)? + # what about three variables? Function to assign matrix to localmean_cov? + vars_form <- paste(all_vars, collapse = " + ") + + if (vartype == "Local") { + local_weights <- localmean_weight(x = dframe[[xcoord]], y = dframe[[ycoord]], prb = 1 / dframe[[weight]]) + } + + if ("Mean" %in% statistics) { + rslt <- svymean(make.formula(vars_form), design) + rslt_con <- svycontrast(rslt, vars) + if (vartype == "Local") { + tw <- sum(dframe[[weight]]) + local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Mean"))) + cov_mx <- localmean_cov(local_vars, local_weights) / tw^2 + derivs <- lapply(all_vars, D, expr = vars) + grad <- rbind(get_grad(all_vars, derivs, rslt)) + se_val <- sqrt(grad %*% cov_mx %*% t(grad)) + } + + if (vartype == "SRS") { + se_val <- SE(rslt_con) + } + + mean_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) + } + + + if ("Total" %in% statistics) { + rslt <- svytotal(make.formula(vars_form), design) + rslt_con <- svycontrast(rslt, vars) + if (vartype == "Local") { + tw <- sum(dframe[[weight]]) + local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Total"))) + cov_mx <- localmean_cov(local_vars, local_weights) + derivs <- lapply(all_vars, D, expr = vars) + grad <- rbind(get_grad(all_vars, derivs, rslt)) + se_val <- sqrt(grad %*% cov_mx %*% t(grad)) + } + + if (vartype == "SRS") { + se_val <- SE(rslt_con) + } + + tot_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) + + } + + contr_out <- list() + + if ("Mean" %in% statistics) { + contr_out$Mean <- mean_df + } else { + contr_out$Mean <- NULL + } + + if ("Total" %in% statistics) { + contr_out$Total <- tot_df + } else { + contr_out$Total <- NULL + } + + contr_out + +} + +get_grad <- function(all_vars, derivs, svyout) { + + # hoping for no name conflict with .use suffix + grad.use.spsurvey <- rep(0, length(all_vars)) + derivs.use.spsurvey <- derivs + svyout.use.spsurvey <- svyout + + for (x in all_vars) { + assign(x, svyout.use.spsurvey[[x]]) + } + + for (i in seq_along(grad.use.spsurvey)) { + grad.use.spsurvey[i] <- eval(derivs.use.spsurvey[[i]]) + } + grad.use.spsurvey +} + +get_local_vars <- function(dframe, weight, var, rslt, statistic) { + if (statistic == "Mean") { + val <- (dframe[[var]] - rslt[[var]]) * dframe[[weight]] + } + + if (statistic == "Total") { + val <- dframe[[var]] * dframe[[weight]] + } + val +} From 7ff3575052580a654d5e5e32c49c99eea926599a Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Mon, 30 Oct 2023 13:11:06 -0700 Subject: [PATCH 02/13] changed DESCRIPTION started NEWS for 5.6.0 --- DESCRIPTION | 2 +- NEWS.md | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index 1637952..b695fec 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: spsurvey Title: Spatial Sampling Design and Analysis -Version: 5.5.0 +Version: 5.6.0 Authors@R: c( person("Michael", "Dumelle", role=c("aut","cre"), email = "Dumelle.Michael@epa.gov", comment = c(ORCID = "0000-0002-3393-5529")), diff --git a/NEWS.md b/NEWS.md index 106db2e..abbe8f2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,11 @@ +# spsurvey 5.6.0 + +* Added a `contrast_analysis()` function to estimate linear and nonlinear functions of estimates. + +## Minor Updates + +## Bug Fixes + # spsurvey 5.5.0 ## Minor Updates From e7c44659f6896268a196615c2a071ecc563632b3 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Mon, 4 Dec 2023 11:47:41 -0800 Subject: [PATCH 03/13] comment out contrast_analysis() until further notice --- R/contrast_analysis.R | 250 +++++++++++++++++++++--------------------- 1 file changed, 125 insertions(+), 125 deletions(-) diff --git a/R/contrast_analysis.R b/R/contrast_analysis.R index de474d0..e10651d 100644 --- a/R/contrast_analysis.R +++ b/R/contrast_analysis.R @@ -1,125 +1,125 @@ -contrast_analysis <- function(dframe, vars, siteID = NULL, weight = "weight", xcoord, ycoord, vartype = "Local", - conf = 95, statistics = c("Mean", "Total")) { - - - - if (is.null(siteID)) { - siteID <- "siteID" - dframe$siteID <- paste("site", seq_len(nrow(dframe)), sep = "-") - } - - # create design object - stratum_ind <- FALSE - stratumID <- NULL - cluster_ind <- FALSE - clusterID <- NULL - weight1 <- NULL - sizeweight <- FALSE - sweight <- NULL - sweight1 <- NULL - fpcfactor_ind <- FALSE - fpcsize <- NULL - Ncluster <- NULL - stage1size <- NULL - jointprob <- NULL - - design <- survey_design( - dframe, siteID, weight, stratum_ind, stratumID, cluster_ind, clusterID, - weight1, sizeweight, sweight, sweight1, fpcfactor_ind, fpcsize, Ncluster, - stage1size, vartype, jointprob - ) - - all_vars <- all.vars(vars) - # incorporate form when all_vars has only length one (function of single variable)? - # what about three variables? Function to assign matrix to localmean_cov? - vars_form <- paste(all_vars, collapse = " + ") - - if (vartype == "Local") { - local_weights <- localmean_weight(x = dframe[[xcoord]], y = dframe[[ycoord]], prb = 1 / dframe[[weight]]) - } - - if ("Mean" %in% statistics) { - rslt <- svymean(make.formula(vars_form), design) - rslt_con <- svycontrast(rslt, vars) - if (vartype == "Local") { - tw <- sum(dframe[[weight]]) - local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Mean"))) - cov_mx <- localmean_cov(local_vars, local_weights) / tw^2 - derivs <- lapply(all_vars, D, expr = vars) - grad <- rbind(get_grad(all_vars, derivs, rslt)) - se_val <- sqrt(grad %*% cov_mx %*% t(grad)) - } - - if (vartype == "SRS") { - se_val <- SE(rslt_con) - } - - mean_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) - } - - - if ("Total" %in% statistics) { - rslt <- svytotal(make.formula(vars_form), design) - rslt_con <- svycontrast(rslt, vars) - if (vartype == "Local") { - tw <- sum(dframe[[weight]]) - local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Total"))) - cov_mx <- localmean_cov(local_vars, local_weights) - derivs <- lapply(all_vars, D, expr = vars) - grad <- rbind(get_grad(all_vars, derivs, rslt)) - se_val <- sqrt(grad %*% cov_mx %*% t(grad)) - } - - if (vartype == "SRS") { - se_val <- SE(rslt_con) - } - - tot_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) - - } - - contr_out <- list() - - if ("Mean" %in% statistics) { - contr_out$Mean <- mean_df - } else { - contr_out$Mean <- NULL - } - - if ("Total" %in% statistics) { - contr_out$Total <- tot_df - } else { - contr_out$Total <- NULL - } - - contr_out - -} - -get_grad <- function(all_vars, derivs, svyout) { - - # hoping for no name conflict with .use suffix - grad.use.spsurvey <- rep(0, length(all_vars)) - derivs.use.spsurvey <- derivs - svyout.use.spsurvey <- svyout - - for (x in all_vars) { - assign(x, svyout.use.spsurvey[[x]]) - } - - for (i in seq_along(grad.use.spsurvey)) { - grad.use.spsurvey[i] <- eval(derivs.use.spsurvey[[i]]) - } - grad.use.spsurvey -} - -get_local_vars <- function(dframe, weight, var, rslt, statistic) { - if (statistic == "Mean") { - val <- (dframe[[var]] - rslt[[var]]) * dframe[[weight]] - } - - if (statistic == "Total") { - val <- dframe[[var]] * dframe[[weight]] - } - val -} +# contrast_analysis <- function(dframe, vars, siteID = NULL, weight = "weight", xcoord, ycoord, vartype = "Local", +# conf = 95, statistics = c("Mean", "Total")) { +# +# +# +# if (is.null(siteID)) { +# siteID <- "siteID" +# dframe$siteID <- paste("site", seq_len(nrow(dframe)), sep = "-") +# } +# +# # create design object +# stratum_ind <- FALSE +# stratumID <- NULL +# cluster_ind <- FALSE +# clusterID <- NULL +# weight1 <- NULL +# sizeweight <- FALSE +# sweight <- NULL +# sweight1 <- NULL +# fpcfactor_ind <- FALSE +# fpcsize <- NULL +# Ncluster <- NULL +# stage1size <- NULL +# jointprob <- NULL +# +# design <- survey_design( +# dframe, siteID, weight, stratum_ind, stratumID, cluster_ind, clusterID, +# weight1, sizeweight, sweight, sweight1, fpcfactor_ind, fpcsize, Ncluster, +# stage1size, vartype, jointprob +# ) +# +# all_vars <- all.vars(vars) +# # incorporate form when all_vars has only length one (function of single variable)? +# # what about three variables? Function to assign matrix to localmean_cov? +# vars_form <- paste(all_vars, collapse = " + ") +# +# if (vartype == "Local") { +# local_weights <- localmean_weight(x = dframe[[xcoord]], y = dframe[[ycoord]], prb = 1 / dframe[[weight]]) +# } +# +# if ("Mean" %in% statistics) { +# rslt <- svymean(make.formula(vars_form), design) +# rslt_con <- svycontrast(rslt, vars) +# if (vartype == "Local") { +# tw <- sum(dframe[[weight]]) +# local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Mean"))) +# cov_mx <- localmean_cov(local_vars, local_weights) / tw^2 +# derivs <- lapply(all_vars, D, expr = vars) +# grad <- rbind(get_grad(all_vars, derivs, rslt)) +# se_val <- sqrt(grad %*% cov_mx %*% t(grad)) +# } +# +# if (vartype == "SRS") { +# se_val <- SE(rslt_con) +# } +# +# mean_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) +# } +# +# +# if ("Total" %in% statistics) { +# rslt <- svytotal(make.formula(vars_form), design) +# rslt_con <- svycontrast(rslt, vars) +# if (vartype == "Local") { +# tw <- sum(dframe[[weight]]) +# local_vars <- do.call(cbind, lapply(all_vars, function(x) get_local_vars(dframe, weight, x, rslt, "Total"))) +# cov_mx <- localmean_cov(local_vars, local_weights) +# derivs <- lapply(all_vars, D, expr = vars) +# grad <- rbind(get_grad(all_vars, derivs, rslt)) +# se_val <- sqrt(grad %*% cov_mx %*% t(grad)) +# } +# +# if (vartype == "SRS") { +# se_val <- SE(rslt_con) +# } +# +# tot_df <- data.frame(Estimate = rslt_con[1], StdError = se_val, LCB95 = rslt_con[1] - 1.96 * se_val, UCB95 = rslt_con[1] + 1.96 * se_val) +# +# } +# +# contr_out <- list() +# +# if ("Mean" %in% statistics) { +# contr_out$Mean <- mean_df +# } else { +# contr_out$Mean <- NULL +# } +# +# if ("Total" %in% statistics) { +# contr_out$Total <- tot_df +# } else { +# contr_out$Total <- NULL +# } +# +# contr_out +# +# } +# +# get_grad <- function(all_vars, derivs, svyout) { +# +# # hoping for no name conflict with .use suffix +# grad.use.spsurvey <- rep(0, length(all_vars)) +# derivs.use.spsurvey <- derivs +# svyout.use.spsurvey <- svyout +# +# for (x in all_vars) { +# assign(x, svyout.use.spsurvey[[x]]) +# } +# +# for (i in seq_along(grad.use.spsurvey)) { +# grad.use.spsurvey[i] <- eval(derivs.use.spsurvey[[i]]) +# } +# grad.use.spsurvey +# } +# +# get_local_vars <- function(dframe, weight, var, rslt, statistic) { +# if (statistic == "Mean") { +# val <- (dframe[[var]] - rslt[[var]]) * dframe[[weight]] +# } +# +# if (statistic == "Total") { +# val <- dframe[[var]] * dframe[[weight]] +# } +# val +# } From 8546c2dbba0992d8c9630cfa0a87ed59bc4a3545 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Mon, 4 Dec 2023 12:40:19 -0800 Subject: [PATCH 04/13] fix revisit_dsgn bug that did not print all panels --- DESCRIPTION | 4 ++-- NEWS.md | 10 +++++++++- R/revisit_dsgn.R | 4 ++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 1637952..ce798cc 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: spsurvey Title: Spatial Sampling Design and Analysis -Version: 5.5.0 +Version: 5.6.0 Authors@R: c( person("Michael", "Dumelle", role=c("aut","cre"), email = "Dumelle.Michael@epa.gov", comment = c(ORCID = "0000-0002-3393-5529")), @@ -40,5 +40,5 @@ BugReports: https://github.com/USEPA/spsurvey/issues VignetteBuilder: knitr Encoding: UTF-8 LazyData: true -RoxygenNote: 7.1.2 +RoxygenNote: 7.2.3 NeedsCompilation: no diff --git a/NEWS.md b/NEWS.md index 106db2e..f39d080 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,11 @@ +# spsurvey 5.6.0 + +## Minor Updates + +## Bug Fixes + +* Fixed a bug in `revisit_dsgn` that prevented proper printing of panels when there were multiple panels. + # spsurvey 5.5.0 ## Minor Updates @@ -14,7 +22,7 @@ ## Bug Fixes -* Fixed a bug that caused an erorr in `grts()` and `irs()` occurred when at least +* Fixed a bug that caused an error in `grts()` and `irs()` occurred when at least one variable name in `sframe` was named `"siteID"`, `"siteuse"`, `"replsite"`, `"lon_WGS84"`, `"lat_WGS84"`, `"stratum"`, `"wgt"`, `"ip"`, `"caty"`, `"aux"`, `xcoord`, `ycoord`, or `idpts` and the name of the geometry column in `sframe` diff --git a/R/revisit_dsgn.R b/R/revisit_dsgn.R index 7f50636..318476b 100644 --- a/R/revisit_dsgn.R +++ b/R/revisit_dsgn.R @@ -375,6 +375,6 @@ revisit_dsgn <- function(n_period, panels, begin = 1, skip = 1) { } # return final revisit panel design structure - class(pan_dsgn) <- "paneldesign" - return(pan_dsgn) + class(panels_dsgn) <- "paneldesign" + return(panels_dsgn) } From 5a76d216a616ed24a6eb7632221ab27fd182e904 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Mon, 4 Dec 2023 12:44:29 -0800 Subject: [PATCH 05/13] update NEWS merge --- NEWS.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/NEWS.md b/NEWS.md index b37d5ea..f39d080 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,19 +1,11 @@ # spsurvey 5.6.0 -<<<<<<< HEAD -======= -* Added a `contrast_analysis()` function to estimate linear and nonlinear functions of estimates. - ->>>>>>> 7ff3575052580a654d5e5e32c49c99eea926599a ## Minor Updates ## Bug Fixes -<<<<<<< HEAD * Fixed a bug in `revisit_dsgn` that prevented proper printing of panels when there were multiple panels. -======= ->>>>>>> 7ff3575052580a654d5e5e32c49c99eea926599a # spsurvey 5.5.0 ## Minor Updates From e55c09236df3276d8fd5067944930c46e8cbb6e1 Mon Sep 17 00:00:00 2001 From: Olsen Date: Tue, 19 Dec 2023 08:51:39 -0800 Subject: [PATCH 06/13] test --- R/revisit_dsgn.R | 1 + 1 file changed, 1 insertion(+) diff --git a/R/revisit_dsgn.R b/R/revisit_dsgn.R index 318476b..efebf82 100644 --- a/R/revisit_dsgn.R +++ b/R/revisit_dsgn.R @@ -377,4 +377,5 @@ revisit_dsgn <- function(n_period, panels, begin = 1, skip = 1) { # return final revisit panel design structure class(panels_dsgn) <- "paneldesign" return(panels_dsgn) + } From df97cad9d54878496da19cf881b4ced909ce881c Mon Sep 17 00:00:00 2001 From: Olsen Date: Tue, 19 Dec 2023 14:24:27 -0800 Subject: [PATCH 07/13] test --- R/revisit_dsgn.R | 1 - 1 file changed, 1 deletion(-) diff --git a/R/revisit_dsgn.R b/R/revisit_dsgn.R index efebf82..318476b 100644 --- a/R/revisit_dsgn.R +++ b/R/revisit_dsgn.R @@ -377,5 +377,4 @@ revisit_dsgn <- function(n_period, panels, begin = 1, skip = 1) { # return final revisit panel design structure class(panels_dsgn) <- "paneldesign" return(panels_dsgn) - } From 2d33a2216314cec5c550c89a3fde8d23903a96af Mon Sep 17 00:00:00 2001 From: Olsen Date: Wed, 20 Dec 2023 08:20:10 -0800 Subject: [PATCH 08/13] test --- R/revisit_dsgn.R | 1 + 1 file changed, 1 insertion(+) diff --git a/R/revisit_dsgn.R b/R/revisit_dsgn.R index 318476b..efebf82 100644 --- a/R/revisit_dsgn.R +++ b/R/revisit_dsgn.R @@ -377,4 +377,5 @@ revisit_dsgn <- function(n_period, panels, begin = 1, skip = 1) { # return final revisit panel design structure class(panels_dsgn) <- "paneldesign" return(panels_dsgn) + } From 7ef5f5f91289d785fd98cb930d1fc74d5b1b477c Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Thu, 21 Dec 2023 09:06:21 -0800 Subject: [PATCH 09/13] fixed bug in grts() and irs() with empty geometries --- NEWS.md | 3 ++- R/grts_stratum.R | 5 +++-- R/irs_stratum.R | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/NEWS.md b/NEWS.md index f39d080..28511e8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,7 +4,8 @@ ## Bug Fixes -* Fixed a bug in `revisit_dsgn` that prevented proper printing of panels when there were multiple panels. +* Fixed a bug in `revisit_dsgn()` that prevented proper printing of panels when there were multiple panels. +* Fixed a bug that prevented `grts()` and `irs()` from working properly with empty `LINESTRING` or `POLYGON` geometries. # spsurvey 5.5.0 diff --git a/R/grts_stratum.R b/R/grts_stratum.R index c873346..f6bac95 100644 --- a/R/grts_stratum.R +++ b/R/grts_stratum.R @@ -116,7 +116,7 @@ grts_stratum <- function(stratum, dsgn, sframe, sf_type, wgt_units = NULL, pt_de n_size <- as.integer(ceiling(pmin(1e9, pt_density * (n_base + n_over)))) sfpts <- st_sample(sftmp, size = n_size, type = "regular", exact = TRUE) sfpts <- st_as_sf(as.data.frame(sfpts), crs = st_crs(sftmp)) - sfpts <- st_cast(sfpts, to = "POINT") + # sfpts <- st_cast(sfpts, to = "POINT") # drop features with no points sfpts <- sfpts[!st_is_empty(sfpts), ] # join sites with linear features @@ -145,9 +145,10 @@ grts_stratum <- function(stratum, dsgn, sframe, sf_type, wgt_units = NULL, pt_de n_size <- as.integer(ceiling(pmin(1e9, pt_density * (n_base + n_over)))) sfpts <- st_sample(sftmp, size = n_size, type = "hexagonal", exact = TRUE) sfpts <- st_as_sf(as.data.frame(sfpts), crs = st_crs(sftmp)) - sfpts <- st_cast(sfpts, to = "POINT") + # sfpts <- st_cast(sfpts, to = "POINT") # drop features with no points sfpts <- sfpts[!st_is_empty(sfpts), ] + sfpts <- st_cast(sfpts, to = "POINT") sftmp <- st_join(sfpts, sftmp) sftmp$xcoord <- st_coordinates(sftmp)[, "X"] sftmp$ycoord <- st_coordinates(sftmp)[, "Y"] diff --git a/R/irs_stratum.R b/R/irs_stratum.R index a48c920..1def138 100644 --- a/R/irs_stratum.R +++ b/R/irs_stratum.R @@ -86,7 +86,7 @@ irs_stratum <- function(stratum, dsgn, sframe, sf_type, wgt_units = NULL, pt_den n_size <- as.integer(ceiling(pmin(1e9, pt_density * (n_base + n_over)))) sfpts <- st_sample(sftmp, size = n_size, type = "regular", exact = TRUE) sfpts <- st_as_sf(as.data.frame(sfpts), crs = st_crs(sftmp)) - sfpts <- st_cast(sfpts, to = "POINT") + # sfpts <- st_cast(sfpts, to = "POINT") # drop features with no points sfpts <- sfpts[!st_is_empty(sfpts), ] # join sites with linear features @@ -115,9 +115,10 @@ irs_stratum <- function(stratum, dsgn, sframe, sf_type, wgt_units = NULL, pt_den n_size <- as.integer(ceiling(pmin(1e9, pt_density * (n_base + n_over)))) sfpts <- st_sample(sftmp, size = n_size, type = "hexagonal", exact = TRUE) sfpts <- st_as_sf(as.data.frame(sfpts), crs = st_crs(sftmp)) - sfpts <- st_cast(sfpts, to = "POINT") + # sfpts <- st_cast(sfpts, to = "POINT") # drop features with no points sfpts <- sfpts[!st_is_empty(sfpts), ] + sfpts <- st_cast(sfpts, to = "POINT") sftmp <- st_join(sfpts, sftmp) sftmp$xcoord <- st_coordinates(sftmp)[, "X"] sftmp$ycoord <- st_coordinates(sftmp)[, "Y"] From eebad4119d50a0526198c736778f942065922d42 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Thu, 28 Dec 2023 14:01:21 -0800 Subject: [PATCH 10/13] Fix bug in grts() and irs() from returning coordinates in output when geometry column name different from geometry and legacy_sites was specified #40. --- NEWS.md | 5 ++--- R/grts.R | 39 ++++++++++++++------------------------- R/irs.R | 36 ++++++++++++------------------------ man/grts.Rd | 2 +- man/irs.Rd | 2 +- 5 files changed, 30 insertions(+), 54 deletions(-) diff --git a/NEWS.md b/NEWS.md index 28511e8..e422e7f 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,11 +1,10 @@ -# spsurvey 5.6.0 - -## Minor Updates +# spsurvey 5.5.1 ## Bug Fixes * Fixed a bug in `revisit_dsgn()` that prevented proper printing of panels when there were multiple panels. * Fixed a bug that prevented `grts()` and `irs()` from working properly with empty `LINESTRING` or `POLYGON` geometries. +* Fixed a bug that prevented `grts()` and `irs()` from returning coordinates when the the `geometry` column of `sframe` was not `"geometry"` and `legacy_sites` was specified ([#40](https://github.com/USEPA/spsurvey/issues/40)). # spsurvey 5.5.0 diff --git a/R/grts.R b/R/grts.R index 6c4eb8c..b4b507f 100644 --- a/R/grts.R +++ b/R/grts.R @@ -77,7 +77,7 @@ #' geometry representing the legacy sites. spsurvey assumes that #' the legacy sites were selected from a previous sampling design that #' incorporated randomness into site selection and that the legacy sites -#' are elements of the current sampling frame. If \code{sframe} has a +#' are elements of the current sampling frame. If \code{sframe} has a #' \code{POINT} or \code{MULTIPOINT} geometry, the observations in \code{legacy_sites} #' should not also be in \code{sframe} (i.e., duplicates are not removed). Thus, \code{sframe} #' and \code{legacy_sites} together compose the current sampling frame. If m or z values @@ -153,8 +153,8 @@ #' If \code{n_over} is an unnamed, length-one vector, it's value is recycled #' and used for each stratum. Note that if the #' sampling design has unequal selection probabilities (\code{seltype = "unequal"}), then \code{n_over} sites -#' are given the same proportion of \code{caty_n} values as \code{n_base}. -#' +#' are given the same proportion of \code{caty_n} values as \code{n_base}. +#' #' #' @param n_near The number of nearest neighbor (nn) replacement sites. #' If the sampling design is unstratified, \code{n_near} is integer from \code{1} @@ -340,6 +340,7 @@ grts <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = legacy_caty_var = NULL, legacy_aux_var = NULL, mindis = NULL, maxtry = 10, n_over = NULL, n_near = NULL, wgt_units = NULL, pt_density = NULL, DesignID = "Site", SiteBegin = 1, sep = "-", projcrs_check = TRUE) { + if (inherits(sframe, c("tbl_df", "tbl"))) { # identify if tibble class elements are present class(sframe) <- setdiff(class(sframe), c("tbl_df", "tbl")) # remove tibble class for rownames warning @@ -350,18 +351,17 @@ grts <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = # remove tibble class for rownames warning } + geom_col_name <- attr(sframe, "sf_column") + st_geometry(sframe) <- "geometry" if (!is.null(legacy_sites)) { - sframe_geom_name <- attr(sframe, "sf_column") - legacy_geom_name <- attr(legacy_sites, "sf_column") - names(legacy_sites)[names(legacy_sites) == legacy_geom_name] <- sframe_geom_name - st_geometry(legacy_sites) <- sframe_geom_name + st_geometry(legacy_sites) <- "geometry" } - + # save initial variable specifications for the design list later initial_stratum_var <- stratum_var initial_caty_var <- caty_var initial_aux_var <- aux_var - + # Create warning indicator and data frame to collect all potential issues during # sample selection warn_ind <- FALSE @@ -436,14 +436,6 @@ grts <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = legacy_sites_names <- names(legacy_sites) } - # Find geometry column name - geom_col_name <- attr(sframe, "sf_column") - if (geom_col_name != "geometry") { - # Force to geometry for other sf consistency - names(sframe)[names(sframe) == geom_col_name] <- "geometry" - st_geometry(sframe) <- "geometry" - } - ## Create variables in sampling frame if needed. # Create unique sampling frame ID values sframe$id <- 1:nrow(sframe) @@ -495,7 +487,7 @@ grts <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = legacy_sites$legacy <- legacy_sites[[legacy_var]] } } - + # save initial variable specifications for the design list later initial_legacy_stratum_var <- legacy_stratum_var initial_legacy_caty_var <- legacy_caty_var @@ -764,29 +756,26 @@ grts <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = if (geom_col_name != "geometry") { # sframe prefix if necessary if (geom_col_name %in% dsgn_names_extra) { - new_geom_col_name <- paste("sframe", geom_col_name, sep = "_") - sframe_names[sframe_names == geom_col_name] <- new_geom_col_name - geom_col_name <- new_geom_col_name + geom_col_name <- paste("sframe", geom_col_name, sep = "_") } # restore original column names if (!is.null(sites_legacy)) { - names(sites_legacy)[names(sites_legacy) == "geometry"] <- geom_col_name st_geometry(sites_legacy) <- geom_col_name + legacy_sites_names[legacy_sites_names == "geometry"] <- geom_col_name + st_geometry(legacy_sites) <- geom_col_name } if (!is.null(sites_base)) { - names(sites_base)[names(sites_base) == "geometry"] <- geom_col_name + sframe_names[sframe_names == "geometry"] <- geom_col_name st_geometry(sites_base) <- geom_col_name } if (!is.null(sites_over)) { - names(sites_over)[names(sites_over) == "geometry"] <- geom_col_name st_geometry(sites_over) <- geom_col_name } if (!is.null(sites_near)) { - names(sites_near)[names(sites_near) == "geometry"] <- geom_col_name st_geometry(sites_near) <- geom_col_name } } diff --git a/R/irs.R b/R/irs.R index e4d7474..520d48f 100644 --- a/R/irs.R +++ b/R/irs.R @@ -38,6 +38,7 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N legacy_caty_var = NULL, legacy_aux_var = NULL, mindis = NULL, maxtry = 10, n_over = NULL, n_near = NULL, wgt_units = NULL, pt_density = NULL, DesignID = "Site", SiteBegin = 1, sep = "-", projcrs_check = TRUE) { + if (inherits(sframe, c("tbl_df", "tbl"))) { # identify if tibble class elements are present class(sframe) <- setdiff(class(sframe), c("tbl_df", "tbl")) # remove tibble class for rownames warning @@ -48,18 +49,17 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N # remove tibble class for rownames warning } + geom_col_name <- attr(sframe, "sf_column") + st_geometry(sframe) <- "geometry" if (!is.null(legacy_sites)) { - sframe_geom_name <- attr(sframe, "sf_column") - legacy_geom_name <- attr(legacy_sites, "sf_column") - names(legacy_sites)[names(legacy_sites) == legacy_geom_name] <- sframe_geom_name - st_geometry(legacy_sites) <- sframe_geom_name + st_geometry(legacy_sites) <- "geometry" } - + # save initial variable specifications for the design list later initial_stratum_var <- stratum_var initial_caty_var <- caty_var initial_aux_var <- aux_var - + # Create warning indicator and data frame to collect all potential issues during # sample selection warn_ind <- FALSE @@ -132,15 +132,6 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N legacy_sites_names <- names(legacy_sites) } - # Find geometry column name - geom_col_name <- attr(sframe, "sf_column") - if (geom_col_name != "geometry") { - # Force to geometry for other sf consistency - names(sframe)[names(sframe) == geom_col_name] <- "geometry" - st_geometry(sframe) <- "geometry" - } - - ## Create variables in sample frame if needed. # Create unique sample frame ID values sframe$id <- 1:nrow(sframe) @@ -192,7 +183,7 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N legacy_sites$legacy <- legacy_sites[[legacy_var]] } } - + # save initial variable specifications for the design list later initial_legacy_stratum_var <- legacy_stratum_var initial_legacy_caty_var <- legacy_caty_var @@ -461,29 +452,26 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N if (geom_col_name != "geometry") { # sframe prefix if necessary if (geom_col_name %in% dsgn_names_extra) { - new_geom_col_name <- paste("sframe", geom_col_name, sep = "_") - sframe_names[sframe_names == geom_col_name] <- new_geom_col_name - geom_col_name <- new_geom_col_name + geom_col_name <- paste("sframe", geom_col_name, sep = "_") } # restore original column names if (!is.null(sites_legacy)) { - names(sites_legacy)[names(sites_legacy) == "geometry"] <- geom_col_name st_geometry(sites_legacy) <- geom_col_name + legacy_sites_names[legacy_sites_names == "geometry"] <- geom_col_name + st_geometry(legacy_sites) <- geom_col_name } if (!is.null(sites_base)) { - names(sites_base)[names(sites_base) == "geometry"] <- geom_col_name + sframe_names[sframe_names == "geometry"] <- geom_col_name st_geometry(sites_base) <- geom_col_name } if (!is.null(sites_over)) { - names(sites_over)[names(sites_over) == "geometry"] <- geom_col_name st_geometry(sites_over) <- geom_col_name } if (!is.null(sites_near)) { - names(sites_near)[names(sites_near) == "geometry"] <- geom_col_name st_geometry(sites_near) <- geom_col_name } } @@ -608,7 +596,7 @@ irs <- function(sframe, n_base, stratum_var = NULL, seltype = NULL, caty_var = N seltype = dsgn$seltype, caty_var = initial_caty_var, caty_n = dsgn$caty_n, aux_var = initial_aux_var, legacy = dsgn$legacy_option, mindis = dsgn$mindis, n_over = dsgn$n_over, n_near = dsgn$n_near ) - + if (any(dsgn$legacy)) { dsgn <- c(dsgn, list(legacy_stratum_var = initial_legacy_stratum_var, legacy_caty_var = initial_legacy_caty_var, legacy_aux_var = initial_legacy_aux_var)) diff --git a/man/grts.Rd b/man/grts.Rd index c4367e6..51bd16c 100644 --- a/man/grts.Rd +++ b/man/grts.Rd @@ -106,7 +106,7 @@ and that the legacy sites are elements of the current sampling frame.} geometry representing the legacy sites. spsurvey assumes that the legacy sites were selected from a previous sampling design that incorporated randomness into site selection and that the legacy sites -are elements of the current sampling frame. If \code{sframe} has a +are elements of the current sampling frame. If \code{sframe} has a \code{POINT} or \code{MULTIPOINT} geometry, the observations in \code{legacy_sites} should not also be in \code{sframe} (i.e., duplicates are not removed). Thus, \code{sframe} and \code{legacy_sites} together compose the current sampling frame. If m or z values diff --git a/man/irs.Rd b/man/irs.Rd index 86a32f3..b365ccc 100644 --- a/man/irs.Rd +++ b/man/irs.Rd @@ -106,7 +106,7 @@ and that the legacy sites are elements of the current sampling frame.} geometry representing the legacy sites. spsurvey assumes that the legacy sites were selected from a previous sampling design that incorporated randomness into site selection and that the legacy sites -are elements of the current sampling frame. If \code{sframe} has a +are elements of the current sampling frame. If \code{sframe} has a \code{POINT} or \code{MULTIPOINT} geometry, the observations in \code{legacy_sites} should not also be in \code{sframe} (i.e., duplicates are not removed). Thus, \code{sframe} and \code{legacy_sites} together compose the current sampling frame. If m or z values From 8e43f8dbd73de320d8e355fd7cda72e39f48d51e Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Thu, 28 Dec 2023 14:01:35 -0800 Subject: [PATCH 11/13] downgrade to 5.5.1 --- DESCRIPTION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index ce798cc..d70204c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: spsurvey Title: Spatial Sampling Design and Analysis -Version: 5.6.0 +Version: 5.5.1 Authors@R: c( person("Michael", "Dumelle", role=c("aut","cre"), email = "Dumelle.Michael@epa.gov", comment = c(ORCID = "0000-0002-3393-5529")), From a8d8356868042b926a916d1a5b1812d0fd8a7414 Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Thu, 28 Dec 2023 15:04:20 -0800 Subject: [PATCH 12/13] some CRAN updates --- R/power_dsgn.R | 2 +- cran-comments.md | 8 +++++--- man/power_dsgn.Rd | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/R/power_dsgn.R b/R/power_dsgn.R index 34db88d..fc03746 100644 --- a/R/power_dsgn.R +++ b/R/power_dsgn.R @@ -103,7 +103,7 @@ #' @keywords survey #' #' @seealso -#' \itemize{ +#' \describe{ #' \item{\code{\link{ppd_plot}}}{ to plot power curves for #' panel designs} #' } diff --git a/cran-comments.md b/cran-comments.md index 8366eac..43d06c5 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,4 +1,4 @@ -This is a minor update that adds few new features and bug fixes. +This is a minor update that adds a few bug fixes. ------- @@ -8,8 +8,8 @@ This is a resubmission. ## R CMD check results -Here is the output from `devtools::check(manual = TRUE)` on R Version 4.1.0, -devtools version 2.4.2, and Windows 10 x64 operating system +Here is the output from `devtools::check(manual = TRUE)` on R Version 4.2.2, +devtools version 2.4.5, and Windows 10 x64 operating system 0 errors | 1 warnings | 0 notes @@ -21,6 +21,8 @@ only 0.39 MB, well below the 1MB suggested limit for manuals. In short, it is my warning in `devtools::check()` (and `NOTE` in rhub builds) does not accurately reflect the size of the final PDF manual installed upon package build. +I have also checked the build on `win_devel` and received a NOTE about a possibly invalid URL. This URL, as best I can tell, is valid. + ## Downstream dependencies This minor update should not affect any downstream dependencies. diff --git a/man/power_dsgn.Rd b/man/power_dsgn.Rd index fbc08fb..2308753 100644 --- a/man/power_dsgn.Rd +++ b/man/power_dsgn.Rd @@ -134,7 +134,7 @@ Urquhart, N. S., W. S. Overton, et al. (1993) Comparing sampling designs pp. 151-173. } \seealso{ -\itemize{ +\describe{ \item{\code{\link{ppd_plot}}}{ to plot power curves for panel designs} } From 3ce9190fb562a5b83e3e06bb29dfa7d93278587d Mon Sep 17 00:00:00 2001 From: Michael Dumelle Date: Tue, 9 Jan 2024 11:26:33 -0800 Subject: [PATCH 13/13] website updates --- docs/404.html | 4 +- docs/LICENSE.html | 41 +- docs/articles/EDA.html | 442 ++++++---- docs/articles/analysis.html | 981 +++++++++++++--------- docs/articles/index.html | 4 +- docs/articles/sampling.html | 691 +++++++++------ docs/articles/start-here.html | 278 ++++-- docs/authors.html | 4 +- docs/index.html | 72 +- docs/news/index.html | 114 ++- docs/pkgdown.yml | 6 +- docs/reference/Illinois_River.html | 6 +- docs/reference/Illinois_River_Legacy.html | 6 +- docs/reference/Lake_Ontario.html | 6 +- docs/reference/NE_Lakes.html | 6 +- docs/reference/NE_Lakes_Legacy.html | 6 +- docs/reference/NE_Lakes_df.html | 6 +- docs/reference/NLA_PNW.html | 6 +- docs/reference/NRSA_EPA7.html | 6 +- docs/reference/adjwgt.html | 34 +- docs/reference/adjwgtNR.html | 22 +- docs/reference/ash1_wgt.html | 28 +- docs/reference/attrisk_analysis.html | 96 +-- docs/reference/cat_analysis.html | 92 +- docs/reference/cdf_plot.html | 114 +-- docs/reference/change_analysis.html | 98 +-- docs/reference/cont_analysis.html | 96 +-- docs/reference/cont_cdfplot.html | 90 +- docs/reference/cont_cdftest.html | 112 +-- docs/reference/cov_panel_dsgn.html | 24 +- docs/reference/diffrisk_analysis.html | 96 +-- docs/reference/errorprnt.html | 6 +- docs/reference/grts.html | 72 +- docs/reference/index.html | 4 +- docs/reference/irs.html | 72 +- docs/reference/localmean_cov.html | 6 +- docs/reference/localmean_var.html | 6 +- docs/reference/localmean_weight.html | 6 +- docs/reference/pd_summary.html | 28 +- docs/reference/plot.html | 80 +- docs/reference/plot.sp_CDF.html | 116 +-- docs/reference/power_dsgn.html | 71 +- docs/reference/ppd_plot.html | 92 +- docs/reference/relrisk_analysis.html | 96 +-- docs/reference/revisit_bibd.html | 30 +- docs/reference/revisit_dsgn.html | 76 +- docs/reference/revisit_rand.html | 48 +- docs/reference/sp_balance.html | 34 +- docs/reference/sp_frame.html | 18 +- docs/reference/sp_plot.html | 86 +- docs/reference/sp_rbind.html | 16 +- docs/reference/sp_summary.html | 32 +- docs/reference/spsurvey-package.html | 4 +- docs/reference/stopprnt.html | 6 +- docs/reference/summary.html | 28 +- docs/reference/trend_analysis.html | 124 +-- docs/reference/warnprnt.html | 6 +- 57 files changed, 2676 insertions(+), 2074 deletions(-) diff --git a/docs/404.html b/docs/404.html index 4ed49bb..44e94f5 100644 --- a/docs/404.html +++ b/docs/404.html @@ -32,7 +32,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -109,7 +109,7 @@

Page not found (404)

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/LICENSE.html b/docs/LICENSE.html index f5cf6e7..c2bc5a0 100644 --- a/docs/LICENSE.html +++ b/docs/LICENSE.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -66,6 +66,7 @@

GNU General Public License

+

Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

@@ -84,7 +85,7 @@

Preamble

TERMS AND CONDITIONS

-

0. Definitions

+

0. Definitions

“This License” refers to version 3 of the GNU General Public License.

“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.

“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.

@@ -95,7 +96,7 @@

0. DefinitionsAn interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.

-

1. Source Code

+

1. Source Code

The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.

A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.

The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.

@@ -104,23 +105,23 @@

1. Source CodeThe Corresponding Source for a work in source code form is that same work.

-

2. Basic Permissions

+

2. Basic Permissions

All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.

You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.

Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.

- +

No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.

When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work’s users, your or third parties’ legal rights to forbid circumvention of technological measures.

-

4. Conveying Verbatim Copies

+

4. Conveying Verbatim Copies

You may convey verbatim copies of the Program’s source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.

You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.

-

5. Conveying Modified Source Versions

+

5. Conveying Modified Source Versions

You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:

  • a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
  • @@ -133,7 +134,7 @@

    5. Conveying Modified Source Versi

A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation’s users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.

-

6. Conveying Non-Source Forms

+

6. Conveying Non-Source Forms

You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:

  • a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
  • @@ -153,7 +154,7 @@

    6. Conveying Non-Source Forms -

    7. Additional Terms

    +

    7. Additional Terms

    “Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.

    When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.

    Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:

    @@ -174,24 +175,24 @@

    7. Additional Terms -

    8. Termination

    +

    8. Termination

    You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).

    However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.

    Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.

    Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.

-

9. Acceptance Not Required for Having Copies

+

9. Acceptance Not Required for Having Copies

You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.

-

10. Automatic Licensing of Downstream Recipients

+

10. Automatic Licensing of Downstream Recipients

Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.

An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party’s predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.

You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.

-

11. Patents

+

11. Patents

A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor’s “contributor version”.

A contributor’s “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.

Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor’s essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.

@@ -202,30 +203,30 @@

11. Patents -

12. No Surrender of Others’ Freedom

+

12. No Surrender of Others’ Freedom

If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.

-

13. Use with the GNU Affero General Public License

+

13. Use with the GNU Affero General Public License

Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.

-

14. Revised Versions of this License

+

14. Revised Versions of this License

The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.

If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy’s public statement of acceptance of a version permanently authorizes you to choose that version for the Program.

Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.

-

15. Disclaimer of Warranty

+

15. Disclaimer of Warranty

THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

-

16. Limitation of Liability

+

16. Limitation of Liability

IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

-

17. Interpretation of Sections 15 and 16

+

17. Interpretation of Sections 15 and 16

If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.

END OF TERMS AND CONDITIONS

@@ -276,7 +277,7 @@

How to Apply These Terms

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/articles/EDA.html b/docs/articles/EDA.html index 1830d95..2b44289 100644 --- a/docs/articles/EDA.html +++ b/docs/articles/EDA.html @@ -33,7 +33,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -87,8 +87,10 @@

Introduction

Before proceeding, we load spsurvey by running

-

The summary() and plot() functions in spsurvey are used to summarize and visualize sampling frames, design sites, and analysis data. Both functions use a formula argument that specifies the variables to summarize or visualize. These functions behave differently for one-sided and two-sided formulas. To learn more about formulas in R, run ?formula. Only the core functionality of summary() and plot() will be covered in this vignette, so to learn more about these functions, run ?summary and ?plot. The sp_summary() and sp_plot() functions can equivalently be used in place of plot() and summary(), respectively (sp_summary() and sp_plot() are currently maintained for backwards compatibility with previous spsurvey versions).

-

The plot() function in spsurvey is built on the plot() function in sf. spsurvey’s plot() function accommodates all the arguments in sf’s plot() function and adds a few additional features. To learn more about the plot() function in sf, run ?plot.sf().

+library(spsurvey)
+

The summary() and plot() functions in +spsurvey are used to summarize and visualize sampling frames, design +sites, and analysis data. Both functions use a formula argument that +specifies the variables to summarize or visualize. These functions +behave differently for one-sided and two-sided formulas. To learn more +about formulas in R, run ?formula. Only the core +functionality of summary() and plot() will be +covered in this vignette, so to learn more about these functions, run +?summary and ?plot. The +sp_summary() and sp_plot() functions can +equivalently be used in place of plot() and +summary(), respectively (sp_summary() and +sp_plot() are currently maintained for backwards +compatibility with previous spsurvey versions).

+

The plot() function in spsurvey is built on the +plot() function in sf. spsurvey’s plot() +function accommodates all the arguments in sf’s plot() +function and adds a few additional features. To learn more about the +plot() function in sf, run ?plot.sf().

Sampling frames

-

Summarizing and visualizing the sampling frame is often helpful to better understand your data and inform additional survey design options (e.g. stratification). To use plot() or sp_summarize(), sampling frames must either be an sf object or a data frame with x-coordinates, y-coordinates, and a crs (coordinate reference system).

-

The NE_Lakes data in spsurvey is a sampling frame (as an sf object) that contains lakes from the Northeastern United States. There are three variables in NE_Lakes you will use next:

+

Summarizing and visualizing the sampling frame is often helpful to +better understand your data and inform additional survey design options +(e.g. stratification). To use plot() or +sp_summarize(), sampling frames must either be an +sf object or a data frame with x-coordinates, +y-coordinates, and a crs (coordinate reference system).

+

The NE_Lakes data in spsurvey is a sampling frame (as an +sf object) that contains lakes from the Northeastern United +States. There are three variables in NE_Lakes you will use +next:

  1. AREA_CAT: lake area categories (small and large)
  2. @@ -123,231 +151,323 @@

    Sampling frames ELEV_CAT: lake elevation categories (low and high)

-

Before summarizing or visualizing a sampling frame, turn it into an object using sp_frame():

+

Before summarizing or visualizing a sampling frame, turn it into an +object using sp_frame():

-NE_Lakes <- sp_frame(NE_Lakes)
+NE_Lakes <- sp_frame(NE_Lakes)

One-sided formulas

-

One-sided formulas are used to summarize and visualize the distributions of variables. The variables of interest should be placed on the right-hand side of the formula. To summarize the distribution of ELEV, run

+

One-sided formulas are used to summarize and visualize the +distributions of variables. The variables of interest should be placed +on the right-hand side of the formula. To summarize the distribution of +ELEV, run

-summary(NE_Lakes, formula = ~ ELEV)
-#>    total          ELEV       
-#>  total:195   Min.   :  0.00  
-#>              1st Qu.: 21.93  
-#>              Median : 69.09  
-#>              Mean   :127.39  
-#>              3rd Qu.:203.25  
-#>              Max.   :561.41
-

The output contains two columns: total and ELEV. The total column returns the total number of lakes, functioning as an “intercept” to the formula (it can by removed by supplying - 1 to the formula). The ELEV column returns a numerical summary of lake elevation. To visualize ELEV, run

+summary(NE_Lakes, formula = ~ ELEV) +#> total ELEV +#> total:195 Min. : 0.00 +#> 1st Qu.: 21.93 +#> Median : 69.09 +#> Mean :127.39 +#> 3rd Qu.:203.25 +#> Max. :561.41
+

The output contains two columns: total and +ELEV. The total column returns the total +number of lakes, functioning as an “intercept” to the formula (it can by +removed by supplying - 1 to the formula). The +ELEV column returns a numerical summary of lake elevation. +To visualize ELEV, run

-plot(NE_Lakes, formula = ~ ELEV)
+plot(NE_Lakes, formula = ~ ELEV)

To summarize the distribution of ELEV_CAT, run

-summary(NE_Lakes, formula = ~ ELEV_CAT)
-#>    total     ELEV_CAT  
-#>  total:195   low :112  
-#>              high: 83
-

The ELEV_CAT column returns the number of lakes in each elevation category. To visualize ELEV_CAT, run

+summary(NE_Lakes, formula = ~ ELEV_CAT) +#> total ELEV_CAT +#> total:195 low :112 +#> high: 83 +

The ELEV_CAT column returns the number of lakes in each +elevation category. To visualize ELEV_CAT, run

-plot(NE_Lakes, formula = ~ ELEV_CAT, key.width = lcm(3))
+plot(NE_Lakes, formula = ~ ELEV_CAT, key.width = lcm(3))

-

The key.width argument extends the plot’s margin to fit the legend text nicely within the plot. The plot’s default title is the formula argument, though this is changed using the main argument to plot().

-

The formula used by summary() and plot() is quite flexible. Additional variables are included using +:

+

The key.width argument extends the plot’s margin to fit +the legend text nicely within the plot. The plot’s default title is the +formula argument, though this is changed using the +main argument to plot().

+

The formula used by summary() and plot() is +quite flexible. Additional variables are included using ++:

-summary(NE_Lakes, formula = ~ ELEV_CAT + AREA_CAT)
-#>    total     ELEV_CAT    AREA_CAT  
-#>  total:195   low :112   small:135  
-#>              high: 83   large: 60
-

The plot() function returns two plots – one for ELEV_CAT and another for AREA_CAT:

+summary(NE_Lakes, formula = ~ ELEV_CAT + AREA_CAT) +#> total ELEV_CAT AREA_CAT +#> total:195 low :112 small:135 +#> high: 83 large: 60 +

The plot() function returns two plots – one for +ELEV_CAT and another for AREA_CAT:

-plot(NE_Lakes, formula = ~ ELEV_CAT + AREA_CAT, key.width = lcm(3))
+plot(NE_Lakes, formula = ~ ELEV_CAT + AREA_CAT, key.width = lcm(3))

-

Interactions are included using the interaction operator, :. The interaction operator returns the interaction between variables and is most useful when used with categorical variables. To summarize the interaction between ELEV_CAT and AREA_CAT, run

+

Interactions are included using the interaction operator, +:. The interaction operator returns the interaction between +variables and is most useful when used with categorical variables. To +summarize the interaction between ELEV_CAT and +AREA_CAT, run

-summary(NE_Lakes, formula = ~ ELEV_CAT:AREA_CAT)
-#>    total      ELEV_CAT:AREA_CAT
-#>  total:195   low:small :82     
-#>              high:small:53     
-#>              low:large :30     
-#>              high:large:30
-

Levels of each variable are separated by :. For example, there are 86 lakes that are in the low elevation category and the small area category. To visualize this interaction, run

+summary(NE_Lakes, formula = ~ ELEV_CAT:AREA_CAT) +#> total ELEV_CAT:AREA_CAT +#> total:195 low:small :82 +#> high:small:53 +#> low:large :30 +#> high:large:30 +

Levels of each variable are separated by :. For example, +there are 86 lakes that are in the low elevation category and the small +area category. To visualize this interaction, run

-plot(NE_Lakes, formula = ~ ELEV_CAT:AREA_CAT, key.width = lcm(3))
+plot(NE_Lakes, formula = ~ ELEV_CAT:AREA_CAT, key.width = lcm(3))

-

The formula accommodates the * operator, which combines the + and : operators. For example, ELEV_CAT*AREA_CAT is shorthand for ELEV_CAT + AREA_CAT + ELEV_CAT:AREA_CAT. The formula also accommodates the . operator, which is shorthand for all variables separated by +.

+

The formula accommodates the * operator, which combines +the + and : operators. For example, +ELEV_CAT*AREA_CAT is shorthand for +ELEV_CAT + AREA_CAT + ELEV_CAT:AREA_CAT. The formula also +accommodates the . operator, which is shorthand for all +variables separated by +.

Two-sided formulas

-

Two-sided formulas are used to summarize the distribution of a left-hand side variable for each level of each right-hand side variable. To summarize the distribution of ELEV for each level of AREA_CAT, run

+

Two-sided formulas are used to summarize the distribution of a +left-hand side variable for each level of each right-hand side variable. +To summarize the distribution of ELEV for each level of +AREA_CAT, run

-summary(NE_Lakes, formula = ELEV ~ AREA_CAT)
-#> ELEV by total: 
-#>       Min. 1st Qu. Median     Mean 3rd Qu.   Max.
-#> total    0  21.925  69.09 127.3862 203.255 561.41
-#> 
-#> ELEV by AREA_CAT: 
-#>       Min. 1st Qu.  Median     Mean  3rd Qu.   Max.
-#> small 0.00   19.64  59.660 117.4473 176.1700 561.41
-#> large 0.01   26.75 102.415 149.7487 241.2025 537.84
-

To visualize the distribution of ELEV for each level of AREA_CAT, run

+summary(NE_Lakes, formula = ELEV ~ AREA_CAT) +#> ELEV by total: +#> Min. 1st Qu. Median Mean 3rd Qu. Max. +#> total 0 21.925 69.09 127.3862 203.255 561.41 +#> +#> ELEV by AREA_CAT: +#> Min. 1st Qu. Median Mean 3rd Qu. Max. +#> small 0.00 19.64 59.660 117.4473 176.1700 561.41 +#> large 0.01 26.75 102.415 149.7487 241.2025 537.84
+

To visualize the distribution of ELEV for each level of +AREA_CAT, run

-plot(NE_Lakes, formula = ELEV ~ AREA_CAT)
+plot(NE_Lakes, formula = ELEV ~ AREA_CAT)

-

To only summarize or visualize a particular level of a single right-hand side variable, use the onlyshow argument:

+

To only summarize or visualize a particular level of a single +right-hand side variable, use the onlyshow argument:

-summary(NE_Lakes, formula = ELEV ~ AREA_CAT, onlyshow = "small")
-#> ELEV by AREA_CAT: 
-#>       Min. 1st Qu. Median     Mean 3rd Qu.   Max.
-#> small    0   19.64  59.66 117.4473  176.17 561.41
+summary(NE_Lakes, formula = ELEV ~ AREA_CAT, onlyshow = "small") +#> ELEV by AREA_CAT: +#> Min. 1st Qu. Median Mean 3rd Qu. Max. +#> small 0 19.64 59.66 117.4473 176.17 561.41
-plot(NE_Lakes, formula = ELEV ~ AREA_CAT, onlyshow = "small")
+plot(NE_Lakes, formula = ELEV ~ AREA_CAT, onlyshow = "small")

-

To summarize the distribution of ELEV_CAT for each level of AREA_CAT, run

+

To summarize the distribution of ELEV_CAT for each level +of AREA_CAT, run

-summary(NE_Lakes, formula = ELEV_CAT ~ AREA_CAT)
-#> ELEV_CAT by total: 
-#>       low high
-#> total 112   83
-#> 
-#> ELEV_CAT by AREA_CAT: 
-#>       low high
-#> small  82   53
-#> large  30   30
-

To visualize the distribution of ELEV_CAT for each level of AREA_CAT, run

+summary(NE_Lakes, formula = ELEV_CAT ~ AREA_CAT) +#> ELEV_CAT by total: +#> low high +#> total 112 83 +#> +#> ELEV_CAT by AREA_CAT: +#> low high +#> small 82 53 +#> large 30 30 +

To visualize the distribution of ELEV_CAT for each level +of AREA_CAT, run

-plot(NE_Lakes, formula = ELEV_CAT ~ AREA_CAT, key.width = lcm(3))
+plot(NE_Lakes, formula = ELEV_CAT ~ AREA_CAT, key.width = lcm(3))

Adjusting graphical parameters

-

There are three arguments in plot() that can adjust graphical parameters:

+

There are three arguments in plot() that can adjust +graphical parameters:

  1. -var_args adjusts graphical parameters simultaneously for all levels of a variable
  2. +var_args adjusts graphical parameters simultaneously +for all levels of a variable
  3. -varlevel_args adjusts graphical parameters uniquely for each level of a variable
  4. +varlevel_args adjusts graphical parameters uniquely for +each level of a variable
  5. -... adjusts graphical parameters for simultaneously for all levels of all variables
  6. +... adjusts graphical parameters for simultaneously for +all levels of all variables
-

The var_args and varlevel_args arguments take lists whose names match variable names in the formula. For varlevel_args, each list element must have an element named levels that matches the variable’s levels. The following example combines all three graphical parameter adjustment arguments:

+

The var_args and varlevel_args arguments +take lists whose names match variable names in the formula. For +varlevel_args, each list element must have an element named +levels that matches the variable’s levels. The following +example combines all three graphical parameter adjustment arguments:

-list1 <- list(main = "Elevation Categories", pal = rainbow)
-list2 <- list(main = "Area Categories")
-list3 <- list(levels = c("small", "large"), pch = c(4, 19))
-plot(
-  NE_Lakes,
-  formula = ~ ELEV_CAT + AREA_CAT,
-  var_args = list(ELEV_CAT = list1, AREA_CAT = list2),
-  varlevel_args = list(AREA_CAT = list3),
-  cex = 0.75,
-  key.width = lcm(3)
-)
+list1 <- list(main = "Elevation Categories", pal = rainbow) +list2 <- list(main = "Area Categories") +list3 <- list(levels = c("small", "large"), pch = c(4, 19)) +plot( + NE_Lakes, + formula = ~ ELEV_CAT + AREA_CAT, + var_args = list(ELEV_CAT = list1, AREA_CAT = list2), + varlevel_args = list(AREA_CAT = list3), + cex = 0.75, + key.width = lcm(3) +)

-

var_args uses list1 to give the ELEV_CAT visualization a new title and color palette; var_args uses list2 to give the AREA_CAT visualization a new title; varlevel_args uses list3 to give the AREA_CAT visualization different shapes for the small and large levels; ... uses cex = 0.75 to reduce the size of all points; and ... uses key.width to adjust legend spacing for all visualizations.

-

If a two-sided formula is used, it is possible to adjust graphical parameters of the left-hand side variable for all levels of a right-hand side variable. This occurs when a sublist matching the structure of varlevel_args is used as an argument to var_args. In this next example, different shapes are used for the small and large levels of AREA_CAT for all levels of ELEV_CAT:

+

var_args uses list1 to give the +ELEV_CAT visualization a new title and color palette; +var_args uses list2 to give the +AREA_CAT visualization a new title; +varlevel_args uses list3 to give the +AREA_CAT visualization different shapes for the small and +large levels; ... uses cex = 0.75 to reduce +the size of all points; and ... uses key.width +to adjust legend spacing for all visualizations.

+

If a two-sided formula is used, it is possible to adjust graphical +parameters of the left-hand side variable for all levels of a right-hand +side variable. This occurs when a sublist matching the structure of +varlevel_args is used as an argument to +var_args. In this next example, different shapes are used +for the small and large levels of AREA_CAT for all levels +of ELEV_CAT:

-sublist <- list(AREA_CAT = list3)
-plot(
-  NE_Lakes,
-  formula = AREA_CAT ~ ELEV_CAT,
-  var_args = list(ELEV_CAT = sublist),
-  key.width = lcm(3)
-)
+sublist <- list(AREA_CAT = list3) +plot( + NE_Lakes, + formula = AREA_CAT ~ ELEV_CAT, + var_args = list(ELEV_CAT = sublist), + key.width = lcm(3) +)

Design sites

-

Design sites (output from the grts() or irs() functions) can be summarized and visualized using summary() and plot() very similarly to how sampling frames were summarized and visualized in the previous section. Soon you will use the grts() function to select a spatially balanced sample. The grts() function does incorporate randomness, so to match your results with this output exactly you will need to set a reproducible seed by running

+

Design sites (output from the grts() or +irs() functions) can be summarized and visualized using +summary() and plot() very similarly to how +sampling frames were summarized and visualized in the previous section. +Soon you will use the grts() function to select a spatially +balanced sample. The grts() function does incorporate +randomness, so to match your results with this output exactly you will +need to set a reproducible seed by running

-

First we will obtain some design sites: To select an equal probability GRTS sample of size 50 with 10 reverse hierarchically ordered replacement sites, run

+set.seed(51)
+

First we will obtain some design sites: To select an equal +probability GRTS sample of size 50 with 10 reverse hierarchically +ordered replacement sites, run

-eqprob_rho <- grts(NE_Lakes, n_base = 50, n_over = 10)
-

Similar to summary() and plot() for sampling frames, summary() and plot() for design sites uses a formula. The formula should include siteuse, which is the name of the variable in the design sites object that indicates the type of each site. The default formula for summary() and plot() is ~siteuse, which summarizes or visualizes the sites objects in the design sites object. By default, the formula is applied to all non-NULL sites objects (in eqprob_rho, the nonNULL sites objects are sites_base (for the base sites) and sites_over (for the reverse hierarchically ordered replacement sites)).

+eqprob_rho <- grts(NE_Lakes, n_base = 50, n_over = 10) +

Similar to summary() and plot() for +sampling frames, summary() and plot() for +design sites uses a formula. The formula should include +siteuse, which is the name of the variable in the design +sites object that indicates the type of each site. The default formula +for summary() and plot() is +~siteuse, which summarizes or visualizes the +sites objects in the design sites object. By default, the +formula is applied to all non-NULL sites +objects (in eqprob_rho, the nonNULL sites +objects are sites_base (for the base sites) and +sites_over (for the reverse hierarchically ordered +replacement sites)).

-summary(eqprob_rho)
-#>    total    siteuse  
-#>  total:60   Base:50  
-#>             Over:10
+summary(eqprob_rho) +#> total siteuse +#> total:60 Base:50 +#> Over:10
-plot(eqprob_rho, key.width = lcm(3))
+plot(eqprob_rho, key.width = lcm(3))

-

The sampling frame may be included as an argument to the plot() function:

+

The sampling frame may be included as an argument to the +plot() function:

-plot(eqprob_rho, NE_Lakes, key.width = lcm(3))
+plot(eqprob_rho, NE_Lakes, key.width = lcm(3))

-

When you include siteuse as a left-hand side variable (siteuse is treated as a categorical variable), you can summarize and visualize the sites object for each level of each right-hand side variable:

+

When you include siteuse as a left-hand side variable +(siteuse is treated as a categorical variable), you can +summarize and visualize the sites object for each level of +each right-hand side variable:

-summary(eqprob_rho, formula = siteuse ~ AREA_CAT)
-#> siteuse by total: 
-#>       Base Over
-#> total   50   10
-#> 
-#> siteuse by AREA_CAT: 
-#>       Base Over
-#> small   35    7
-#> large   15    3
+summary(eqprob_rho, formula = siteuse ~ AREA_CAT) +#> siteuse by total: +#> Base Over +#> total 50 10 +#> +#> siteuse by AREA_CAT: +#> Base Over +#> small 35 7 +#> large 15 3
-plot(eqprob_rho, formula = siteuse ~ AREA_CAT, key.width = lcm(3))
+plot(eqprob_rho, formula = siteuse ~ AREA_CAT, key.width = lcm(3))

-

You can also summarize and visualize a left-hand side variable for each level of siteuse:

+

You can also summarize and visualize a left-hand side variable for +each level of siteuse:

-summary(eqprob_rho, formula = ELEV ~ siteuse)
-#> ELEV by total: 
-#>       Min. 1st Qu. Median    Mean  3rd Qu.   Max.
-#> total 0.03  26.385 65.535 135.364 214.2075 537.84
-#> 
-#> ELEV by siteuse: 
-#>      Min. 1st Qu. Median     Mean 3rd Qu.   Max.
-#> Base 0.68 29.4850  81.76 148.0362 263.640 537.84
-#> Over 0.03 15.1275  54.49  72.0030 119.365 209.25
+summary(eqprob_rho, formula = ELEV ~ siteuse) +#> ELEV by total: +#> Min. 1st Qu. Median Mean 3rd Qu. Max. +#> total 0.03 26.385 65.535 135.364 214.2075 537.84 +#> +#> ELEV by siteuse: +#> Min. 1st Qu. Median Mean 3rd Qu. Max. +#> Base 0.68 29.4850 81.76 148.0362 263.640 537.84 +#> Over 0.03 15.1275 54.49 72.0030 119.365 209.25
-plot(eqprob_rho, formula = ELEV ~ siteuse)
+plot(eqprob_rho, formula = ELEV ~ siteuse)

Analysis data

-

sp_summarize() and plot() work for analysis data the same way they do for sampling frames. The NLA_PNW analysis data in spsurvey is analysis data (as an sf object) from lakes in California, Oregon, and Washington. There are two variables in NLA_PNW you will use next:

+

sp_summarize() and plot() work for analysis +data the same way they do for sampling frames. The NLA_PNW +analysis data in spsurvey is analysis data (as an sf +object) from lakes in California, Oregon, and Washington. There are two +variables in NLA_PNW you will use next:

  1. -STATE: state name (California, Washington, and Oregon)
  2. +STATE: state name (California, +Washington, and Oregon)
  3. -NITR_COND : nitrogen content categories (Poor, Fair, and Good)
  4. +NITR_COND : nitrogen content categories +(Poor, Fair, and Good)
-

Before summarizing or visualizing a sampling frame, turn it into an object using sp_frame():

+

Before summarizing or visualizing a sampling frame, turn it into an +object using sp_frame():

-NLA_PNW <- sp_frame(NLA_PNW)
-

To summarize and visualize NITR_COND across all states, run

+NLA_PNW <- sp_frame(NLA_PNW)
+

To summarize and visualize NITR_COND across all states, +run

-summary(NLA_PNW, formula = ~ NITR_COND)
-#>    total    NITR_COND
-#>  total:96   Fair:24  
-#>             Good:38  
-#>             Poor:34
+summary(NLA_PNW, formula = ~ NITR_COND) +#> total NITR_COND +#> total:96 Fair:24 +#> Good:38 +#> Poor:34
-plot(NLA_PNW, formula = ~ NITR_COND, key.width = lcm(3))
+plot(NLA_PNW, formula = ~ NITR_COND, key.width = lcm(3))

-

Suppose the sampling design was stratified by STATE. To summarize and visualize NITR_COND by STATE, run

+

Suppose the sampling design was stratified by STATE. To +summarize and visualize NITR_COND by STATE, +run

-summary(NLA_PNW, formula = NITR_COND ~ STATE)
-#> NITR_COND by total: 
-#>       Fair Good Poor
-#> total   24   38   34
-#> 
-#> NITR_COND by STATE: 
-#>            Fair Good Poor
-#> California    6    8    5
-#> Oregon        8   26   13
-#> Washington   10    4   16
+summary(NLA_PNW, formula = NITR_COND ~ STATE) +#> NITR_COND by total: +#> Fair Good Poor +#> total 24 38 34 +#> +#> NITR_COND by STATE: +#> Fair Good Poor +#> California 6 8 5 +#> Oregon 8 26 13 +#> Washington 10 4 16
-plot(NLA_PNW, formula = NITR_COND ~ STATE, key.width = lcm(3))
+plot(NLA_PNW, formula = NITR_COND ~ STATE, key.width = lcm(3))

@@ -369,7 +489,7 @@

Analysis data

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/articles/analysis.html b/docs/articles/analysis.html index 913641f..c5dca1f 100644 --- a/docs/articles/analysis.html +++ b/docs/articles/analysis.html @@ -33,7 +33,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -88,7 +88,8 @@

Introduction

-

spsurvey’s analysis functions are used to analyze data in a variety of contexts. We focus mainly on using the NLA_PNW analysis data to introduce some of these analysis functions. The NLA_PNW data contains response variables measured at several lakes in the Pacific Northwest Region (PNW) of the United States. There are several variables in NLA_PNW you will use throughout this vignette:

+

spsurvey’s analysis functions are used to analyze data in a variety +of contexts. We focus mainly on using the NLA_PNW analysis +data to introduce some of these analysis functions. The +NLA_PNW data contains response variables measured at +several lakes in the Pacific Northwest Region (PNW) of the United +States. There are several variables in NLA_PNW you will use +throughout this vignette:

  • SITE_ID: a unique site identifier
  • WEIGHT: the design weights
  • -URBAN: urban land categories (Urban and Non-Urban)
  • +URBAN: urban land categories (Urban and +Non-Urban)
  • -STATE: state name (California, Oregon, and Washington)
  • +STATE: state name (California, +Oregon, and Washington)
  • BMMI: benthic macroinvertebrate multi-metric index
  • -BMMI_COND: benthic macroinvertebrate multi-metric index condition categories (Poor and Good)
  • +BMMI_COND: benthic macroinvertebrate multi-metric index +condition categories (Poor and Good)
  • -PHOS_COND: phosphorous condition categories (Poor and Good)
  • +PHOS_COND: phosphorous condition categories +(Poor and Good)
  • -NITR_COND: nitrogen condition categories (Poor Fair, and Good)
  • +NITR_COND: nitrogen condition categories +(Poor Fair, and Good)

Before proceeding, we load spsurvey by running

+library(spsurvey)

Categorical Variable Analysis

-

Categorical variables are analyzed in spsurvey using the cat_analysis() function. The cat_analysis() function estimates the proportion of observations and the total units (i.e. extent) that belong to each level of the categorical variable (total units refer to the total number (point resources), total line length (linear network), or total area (areal network)). Several useful pieces of information are returned by cat_analysis(), including estimates, standard errors, margins of error, and confidence intervals. The analysis results contain columns with a .P and .U suffixes. The .P suffix corresponds to estimates of proportions for each category, while the .U suffix corresponds to estimates of total units (i.e. extent) for each category.

+

Categorical variables are analyzed in spsurvey using the +cat_analysis() function. The cat_analysis() +function estimates the proportion of observations and the total units +(i.e. extent) that belong to each level of the categorical variable +(total units refer to the total number (point resources), total line +length (linear network), or total area (areal network)). Several useful +pieces of information are returned by cat_analysis(), +including estimates, standard errors, margins of error, and confidence +intervals. The analysis results contain columns with a .P +and .U suffixes. The .P suffix corresponds to +estimates of proportions for each category, while the .U +suffix corresponds to estimates of total units (i.e. extent) for each +category.

Unstratified Analysis

-

To estimate the proportion of total lakes in each nitrogen condition category and the total number of lakes in each nitrogen condition category, run

+

To estimate the proportion of total lakes in each nitrogen condition +category and the total number of lakes in each nitrogen condition +category, run

-cat_ests <- cat_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "NITR_COND",
-  weight = "WEIGHT"
-)
-cat_ests
-#>        Type Subpopulation Indicator Category nResp Estimate.P StdError.P
-#> 1 All_Sites     All Sites NITR_COND     Fair    24   23.69392   6.194024
-#> 2 All_Sites     All Sites NITR_COND     Good    38   51.35111   7.430172
-#> 3 All_Sites     All Sites NITR_COND     Poor    34   24.95496   5.919180
-#> 4 All_Sites     All Sites NITR_COND    Total    96  100.00000   0.000000
-#>   MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U
-#> 1        12.14006   11.55386   35.83399   2530.428   693.5593        1359.351
-#> 2        14.56287   36.78824   65.91398   5484.120  1223.3708        2397.763
-#> 3        11.60138   13.35359   36.55634   2665.103   658.0966        1289.846
-#> 4         0.00000  100.00000  100.00000  10679.652  1416.2707        2775.840
-#>   LCB95Pct.U UCB95Pct.U
-#> 1   1171.077   3889.780
-#> 2   3086.357   7881.883
-#> 3   1375.258   3954.949
-#> 4   7903.812  13455.491
-

The estimate of the proportion of lakes in Good condition is 51.35% with a 95% confidence interval of (36.8%, 65.9%), while the estimate of the total number of lakes in Good condition is 5484 lakes with a 95% confidence interval of (3086, 7882). In each case, the estimated standard error and margin of error is given. The confidence level can be changed using the conf argument. If more than one categorical variable is of interest, then vars can be a vector of variables and separate analyses are performed for each variable.

-

Sometimes the goal is to estimate proportions and totals separately for different subsets of the population – these subsets are called subpopulations. To estimate the proportion of total lakes and in each nitrogen condition category the total number of lakes in each nitrogen condition category separately for California, Oregon, and Washington lakes, run

+cat_ests <- cat_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "NITR_COND", + weight = "WEIGHT" +) +cat_ests +#> Type Subpopulation Indicator Category nResp Estimate.P StdError.P +#> 1 All_Sites All Sites NITR_COND Fair 24 23.69392 6.194024 +#> 2 All_Sites All Sites NITR_COND Good 38 51.35111 7.430172 +#> 3 All_Sites All Sites NITR_COND Poor 34 24.95496 5.919180 +#> 4 All_Sites All Sites NITR_COND Total 96 100.00000 0.000000 +#> MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U +#> 1 12.14006 11.55386 35.83399 2530.428 693.5593 1359.351 +#> 2 14.56287 36.78824 65.91398 5484.120 1223.3708 2397.763 +#> 3 11.60138 13.35359 36.55634 2665.103 658.0966 1289.846 +#> 4 0.00000 100.00000 100.00000 10679.652 1416.2707 2775.840 +#> LCB95Pct.U UCB95Pct.U +#> 1 1171.077 3889.780 +#> 2 3086.357 7881.883 +#> 3 1375.258 3954.949 +#> 4 7903.812 13455.491
+

The estimate of the proportion of lakes in Good +condition is 51.35% with a 95% confidence interval of (36.8%, 65.9%), +while the estimate of the total number of lakes in Good +condition is 5484 lakes with a 95% confidence interval of (3086, 7882). +In each case, the estimated standard error and margin of error is given. +The confidence level can be changed using the conf +argument. If more than one categorical variable is of interest, then +vars can be a vector of variables and separate analyses are +performed for each variable.

+

Sometimes the goal is to estimate proportions and totals separately +for different subsets of the population – these subsets are called +subpopulations. To estimate the proportion of total lakes and in each +nitrogen condition category the total number of lakes in each nitrogen +condition category separately for California, +Oregon, and Washington lakes, run

-cat_ests_sp <- cat_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "NITR_COND",
-  weight = "WEIGHT",
-  subpop = "STATE"
-)
-cat_ests_sp
-#>     Type Subpopulation Indicator Category nResp Estimate.P StdError.P
-#> 1  STATE    California NITR_COND     Fair     6   8.239162   4.712515
-#> 2  STATE    California NITR_COND     Good     8  73.722638  13.359879
-#> 3  STATE    California NITR_COND     Poor     5  18.038200  11.909638
-#> 4  STATE    California NITR_COND    Total    19 100.000000   0.000000
-#> 5  STATE        Oregon NITR_COND     Fair     8  27.152211   9.763817
-#> 6  STATE        Oregon NITR_COND     Good    26  59.670307   9.717093
-#> 7  STATE        Oregon NITR_COND     Poor    13  13.177483   4.901892
-#> 8  STATE        Oregon NITR_COND    Total    47 100.000000   0.000000
-#> 9  STATE    Washington NITR_COND     Fair    10  30.396139  11.938582
-#> 10 STATE    Washington NITR_COND     Good     4  22.711979  11.878964
-#> 11 STATE    Washington NITR_COND     Poor    16  46.891882  13.041148
-#> 12 STATE    Washington NITR_COND    Total    30 100.000000   0.000000
-#>    MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U
-#> 1         9.236359   0.000000   17.47552   208.4605   87.63415        171.7598
-#> 2        26.184881  47.537757   99.90752  1865.2692  940.12797       1842.6170
-#> 3        23.342462   0.000000   41.38066   456.3876  299.29104        586.5997
-#> 4         0.000000 100.000000  100.00000  2530.1172  978.65831       1918.1350
-#> 5        19.136729   8.015482   46.28894  1298.8470  526.66732       1032.2490
-#> 6        19.045152  40.625155   78.71546  2854.3752  674.02641       1321.0675
-#> 7         9.607532   3.569950   22.78501   630.3551  198.49966        389.0522
-#> 8         0.000000 100.000000  100.00000  4783.5773  706.53216       1384.7776
-#> 9        23.399190   6.996949   53.79533  1023.1210  437.69351        857.8635
-#> 10       23.282341   0.000000   45.99432   764.4755  453.48899        888.8221
-#> 11       25.560181  21.331701   72.45206  1578.3606  556.63044       1090.9756
-#> 12        0.000000 100.000000  100.00000  3365.9571  741.89397       1454.0855
-#>    LCB95Pct.U UCB95Pct.U
-#> 1    36.70069   380.2202
-#> 2    22.65222  3707.8861
-#> 3     0.00000  1042.9873
-#> 4   611.98220  4448.2523
-#> 5   266.59801  2331.0960
-#> 6  1533.30774  4175.4427
-#> 7   241.30288  1019.4072
-#> 8  3398.79970  6168.3549
-#> 9   165.25751  1880.9845
-#> 10    0.00000  1653.2976
-#> 11  487.38500  2669.3362
-#> 12 1911.87165  4820.0426
-

If more than one type of subpopulation is of interest, then subpop can be a vector of subpopulation variables and separate analyses are performed for each subpopulation. If both vars and subpops are vectors, separate analyses are performed for each variable and subpopulation combination.

-

Analysis results for all sites (ignoring subpopulations) can be presented alongside the subpopulation analysis results using the All_Sites argument:

+cat_ests_sp <- cat_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "NITR_COND", + weight = "WEIGHT", + subpop = "STATE" +) +cat_ests_sp +#> Type Subpopulation Indicator Category nResp Estimate.P StdError.P +#> 1 STATE California NITR_COND Fair 6 8.239162 4.712515 +#> 2 STATE California NITR_COND Good 8 73.722638 13.359879 +#> 3 STATE California NITR_COND Poor 5 18.038200 11.909638 +#> 4 STATE California NITR_COND Total 19 100.000000 0.000000 +#> 5 STATE Oregon NITR_COND Fair 8 27.152211 9.763817 +#> 6 STATE Oregon NITR_COND Good 26 59.670307 9.717093 +#> 7 STATE Oregon NITR_COND Poor 13 13.177483 4.901892 +#> 8 STATE Oregon NITR_COND Total 47 100.000000 0.000000 +#> 9 STATE Washington NITR_COND Fair 10 30.396139 11.938582 +#> 10 STATE Washington NITR_COND Good 4 22.711979 11.878964 +#> 11 STATE Washington NITR_COND Poor 16 46.891882 13.041148 +#> 12 STATE Washington NITR_COND Total 30 100.000000 0.000000 +#> MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U +#> 1 9.236359 0.000000 17.47552 208.4605 87.63415 171.7598 +#> 2 26.184881 47.537757 99.90752 1865.2692 940.12797 1842.6170 +#> 3 23.342462 0.000000 41.38066 456.3876 299.29104 586.5997 +#> 4 0.000000 100.000000 100.00000 2530.1172 978.65831 1918.1350 +#> 5 19.136729 8.015482 46.28894 1298.8470 526.66732 1032.2490 +#> 6 19.045152 40.625155 78.71546 2854.3752 674.02641 1321.0675 +#> 7 9.607532 3.569950 22.78501 630.3551 198.49966 389.0522 +#> 8 0.000000 100.000000 100.00000 4783.5773 706.53216 1384.7776 +#> 9 23.399190 6.996949 53.79533 1023.1210 437.69351 857.8635 +#> 10 23.282341 0.000000 45.99432 764.4755 453.48899 888.8221 +#> 11 25.560181 21.331701 72.45206 1578.3606 556.63044 1090.9756 +#> 12 0.000000 100.000000 100.00000 3365.9571 741.89397 1454.0855 +#> LCB95Pct.U UCB95Pct.U +#> 1 36.70069 380.2202 +#> 2 22.65222 3707.8861 +#> 3 0.00000 1042.9873 +#> 4 611.98220 4448.2523 +#> 5 266.59801 2331.0960 +#> 6 1533.30774 4175.4427 +#> 7 241.30288 1019.4072 +#> 8 3398.79970 6168.3549 +#> 9 165.25751 1880.9845 +#> 10 0.00000 1653.2976 +#> 11 487.38500 2669.3362 +#> 12 1911.87165 4820.0426
+

If more than one type of subpopulation is of interest, then +subpop can be a vector of subpopulation variables and +separate analyses are performed for each subpopulation. If both +vars and subpops are vectors, separate +analyses are performed for each variable and subpopulation +combination.

+

Analysis results for all sites (ignoring subpopulations) can be +presented alongside the subpopulation analysis results using the +All_Sites argument:

-cat_ests_sp <- cat_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "NITR_COND",
-  weight = "WEIGHT",
-  subpop = "STATE",
-  All_Sites = TRUE
-)
-cat_ests_sp
-#>         Type Subpopulation Indicator Category nResp Estimate.P StdError.P
-#> 1      STATE    California NITR_COND     Fair     6   8.239162   4.712515
-#> 2      STATE    California NITR_COND     Good     8  73.722638  13.359879
-#> 3      STATE    California NITR_COND     Poor     5  18.038200  11.909638
-#> 4      STATE    California NITR_COND    Total    19 100.000000   0.000000
-#> 5      STATE        Oregon NITR_COND     Fair     8  27.152211   9.763817
-#> 6      STATE        Oregon NITR_COND     Good    26  59.670307   9.717093
-#> 7      STATE        Oregon NITR_COND     Poor    13  13.177483   4.901892
-#> 8      STATE        Oregon NITR_COND    Total    47 100.000000   0.000000
-#> 9      STATE    Washington NITR_COND     Fair    10  30.396139  11.938582
-#> 10     STATE    Washington NITR_COND     Good     4  22.711979  11.878964
-#> 11     STATE    Washington NITR_COND     Poor    16  46.891882  13.041148
-#> 12     STATE    Washington NITR_COND    Total    30 100.000000   0.000000
-#> 13 All_Sites     All Sites NITR_COND     Fair    24  23.693923   6.194024
-#> 14 All_Sites     All Sites NITR_COND     Good    38  51.351112   7.430172
-#> 15 All_Sites     All Sites NITR_COND     Poor    34  24.954965   5.919180
-#> 16 All_Sites     All Sites NITR_COND    Total    96 100.000000   0.000000
-#>    MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U
-#> 1         9.236359   0.000000   17.47552   208.4605   87.63415        171.7598
-#> 2        26.184881  47.537757   99.90752  1865.2692  940.12797       1842.6170
-#> 3        23.342462   0.000000   41.38066   456.3876  299.29104        586.5997
-#> 4         0.000000 100.000000  100.00000  2530.1172  978.65831       1918.1350
-#> 5        19.136729   8.015482   46.28894  1298.8470  526.66732       1032.2490
-#> 6        19.045152  40.625155   78.71546  2854.3752  674.02641       1321.0675
-#> 7         9.607532   3.569950   22.78501   630.3551  198.49966        389.0522
-#> 8         0.000000 100.000000  100.00000  4783.5773  706.53216       1384.7776
-#> 9        23.399190   6.996949   53.79533  1023.1210  437.69351        857.8635
-#> 10       23.282341   0.000000   45.99432   764.4755  453.48899        888.8221
-#> 11       25.560181  21.331701   72.45206  1578.3606  556.63044       1090.9756
-#> 12        0.000000 100.000000  100.00000  3365.9571  741.89397       1454.0855
-#> 13       12.140064  11.553860   35.83399  2530.4285  693.55933       1359.3513
-#> 14       14.562870  36.788242   65.91398  5484.1199 1223.37078       2397.7627
-#> 15       11.601379  13.353585   36.55634  2665.1033  658.09658       1289.8456
-#> 16        0.000000 100.000000  100.00000 10679.6517 1416.27075       2775.8397
-#>    LCB95Pct.U UCB95Pct.U
-#> 1    36.70069   380.2202
-#> 2    22.65222  3707.8861
-#> 3     0.00000  1042.9873
-#> 4   611.98220  4448.2523
-#> 5   266.59801  2331.0960
-#> 6  1533.30774  4175.4427
-#> 7   241.30288  1019.4072
-#> 8  3398.79970  6168.3549
-#> 9   165.25751  1880.9845
-#> 10    0.00000  1653.2976
-#> 11  487.38500  2669.3362
-#> 12 1911.87165  4820.0426
-#> 13 1171.07716  3889.7798
-#> 14 3086.35722  7881.8826
-#> 15 1375.25770  3954.9489
-#> 16 7903.81199 13455.4913
+cat_ests_sp <- cat_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "NITR_COND", + weight = "WEIGHT", + subpop = "STATE", + All_Sites = TRUE +) +cat_ests_sp +#> Type Subpopulation Indicator Category nResp Estimate.P StdError.P +#> 1 STATE California NITR_COND Fair 6 8.239162 4.712515 +#> 2 STATE California NITR_COND Good 8 73.722638 13.359879 +#> 3 STATE California NITR_COND Poor 5 18.038200 11.909638 +#> 4 STATE California NITR_COND Total 19 100.000000 0.000000 +#> 5 STATE Oregon NITR_COND Fair 8 27.152211 9.763817 +#> 6 STATE Oregon NITR_COND Good 26 59.670307 9.717093 +#> 7 STATE Oregon NITR_COND Poor 13 13.177483 4.901892 +#> 8 STATE Oregon NITR_COND Total 47 100.000000 0.000000 +#> 9 STATE Washington NITR_COND Fair 10 30.396139 11.938582 +#> 10 STATE Washington NITR_COND Good 4 22.711979 11.878964 +#> 11 STATE Washington NITR_COND Poor 16 46.891882 13.041148 +#> 12 STATE Washington NITR_COND Total 30 100.000000 0.000000 +#> 13 All_Sites All Sites NITR_COND Fair 24 23.693923 6.194024 +#> 14 All_Sites All Sites NITR_COND Good 38 51.351112 7.430172 +#> 15 All_Sites All Sites NITR_COND Poor 34 24.954965 5.919180 +#> 16 All_Sites All Sites NITR_COND Total 96 100.000000 0.000000 +#> MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U +#> 1 9.236359 0.000000 17.47552 208.4605 87.63415 171.7598 +#> 2 26.184881 47.537757 99.90752 1865.2692 940.12797 1842.6170 +#> 3 23.342462 0.000000 41.38066 456.3876 299.29104 586.5997 +#> 4 0.000000 100.000000 100.00000 2530.1172 978.65831 1918.1350 +#> 5 19.136729 8.015482 46.28894 1298.8470 526.66732 1032.2490 +#> 6 19.045152 40.625155 78.71546 2854.3752 674.02641 1321.0675 +#> 7 9.607532 3.569950 22.78501 630.3551 198.49966 389.0522 +#> 8 0.000000 100.000000 100.00000 4783.5773 706.53216 1384.7776 +#> 9 23.399190 6.996949 53.79533 1023.1210 437.69351 857.8635 +#> 10 23.282341 0.000000 45.99432 764.4755 453.48899 888.8221 +#> 11 25.560181 21.331701 72.45206 1578.3606 556.63044 1090.9756 +#> 12 0.000000 100.000000 100.00000 3365.9571 741.89397 1454.0855 +#> 13 12.140064 11.553860 35.83399 2530.4285 693.55933 1359.3513 +#> 14 14.562870 36.788242 65.91398 5484.1199 1223.37078 2397.7627 +#> 15 11.601379 13.353585 36.55634 2665.1033 658.09658 1289.8456 +#> 16 0.000000 100.000000 100.00000 10679.6517 1416.27075 2775.8397 +#> LCB95Pct.U UCB95Pct.U +#> 1 36.70069 380.2202 +#> 2 22.65222 3707.8861 +#> 3 0.00000 1042.9873 +#> 4 611.98220 4448.2523 +#> 5 266.59801 2331.0960 +#> 6 1533.30774 4175.4427 +#> 7 241.30288 1019.4072 +#> 8 3398.79970 6168.3549 +#> 9 165.25751 1880.9845 +#> 10 0.00000 1653.2976 +#> 11 487.38500 2669.3362 +#> 12 1911.87165 4820.0426 +#> 13 1171.07716 3889.7798 +#> 14 3086.35722 7881.8826 +#> 15 1375.25770 3954.9489 +#> 16 7903.81199 13455.4913

Stratified Analysis

-

To estimate the proportion of total lakes in each nitrogen condition category and the total number of lakes in each nitrogen condition category stratified by URBAN category (whether the lake is classified as Urban or Non-Urban), run

+

To estimate the proportion of total lakes in each nitrogen condition +category and the total number of lakes in each nitrogen condition +category stratified by URBAN category (whether the lake is +classified as Urban or Non-Urban), run

-strat_cat_ests <- cat_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "NITR_COND",
-  weight = "WEIGHT",
-  stratumID = "URBAN"
-)
-strat_cat_ests
-#>        Type Subpopulation Indicator Category nResp Estimate.P StdError.P
-#> 1 All_Sites     All Sites NITR_COND     Fair    24   23.69392   6.027083
-#> 2 All_Sites     All Sites NITR_COND     Good    38   51.35111   7.472377
-#> 3 All_Sites     All Sites NITR_COND     Poor    34   24.95496   5.882487
-#> 4 All_Sites     All Sites NITR_COND    Total    96  100.00000   0.000000
-#>   MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U
-#> 1        11.81287   11.88106   35.50679   2530.428   683.8837        1340.387
-#> 2        14.64559   36.70552   65.99670   5484.120  1229.9485        2410.655
-#> 3        11.52946   13.42550   36.48443   2665.103   653.6357        1281.102
-#> 4         0.00000  100.00000  100.00000  10679.652  1440.4796        2823.288
-#>   LCB95Pct.U UCB95Pct.U
-#> 1   1190.041   3870.816
-#> 2   3073.465   7894.775
-#> 3   1384.001   3946.206
-#> 4   7856.363  13502.940
-

To then compute these estimates separately for California, Oregon, and Washington, run

+strat_cat_ests <- cat_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "NITR_COND", + weight = "WEIGHT", + stratumID = "URBAN" +) +strat_cat_ests +#> Type Subpopulation Indicator Category nResp Estimate.P StdError.P +#> 1 All_Sites All Sites NITR_COND Fair 24 23.69392 6.027083 +#> 2 All_Sites All Sites NITR_COND Good 38 51.35111 7.472377 +#> 3 All_Sites All Sites NITR_COND Poor 34 24.95496 5.882487 +#> 4 All_Sites All Sites NITR_COND Total 96 100.00000 0.000000 +#> MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U +#> 1 11.81287 11.88106 35.50679 2530.428 683.8837 1340.387 +#> 2 14.64559 36.70552 65.99670 5484.120 1229.9485 2410.655 +#> 3 11.52946 13.42550 36.48443 2665.103 653.6357 1281.102 +#> 4 0.00000 100.00000 100.00000 10679.652 1440.4796 2823.288 +#> LCB95Pct.U UCB95Pct.U +#> 1 1190.041 3870.816 +#> 2 3073.465 7894.775 +#> 3 1384.001 3946.206 +#> 4 7856.363 13502.940
+

To then compute these estimates separately for +California, Oregon, and +Washington, run

-strat_cat_ests_sp <- cat_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "NITR_COND",
-  weight = "WEIGHT",
-  stratumID = "URBAN",
-  subpop = "STATE"
-)
-strat_cat_ests_sp
-#>     Type Subpopulation Indicator Category nResp Estimate.P StdError.P
-#> 1  STATE    California NITR_COND     Fair     6   8.239162   5.481100
-#> 2  STATE    California NITR_COND     Good     8  73.722638  13.653267
-#> 3  STATE    California NITR_COND     Poor     5  18.038200  12.512366
-#> 4  STATE    California NITR_COND    Total    19 100.000000   0.000000
-#> 5  STATE        Oregon NITR_COND     Fair     8  27.152211   9.592356
-#> 6  STATE        Oregon NITR_COND     Good    26  59.670307   9.845022
-#> 7  STATE        Oregon NITR_COND     Poor    13  13.177483   5.325784
-#> 8  STATE        Oregon NITR_COND    Total    47 100.000000   0.000000
-#> 9  STATE    Washington NITR_COND     Fair    10  30.396139  11.383474
-#> 10 STATE    Washington NITR_COND     Good     4  22.711979   9.039351
-#> 11 STATE    Washington NITR_COND     Poor    16  46.891882  12.109215
-#> 12 STATE    Washington NITR_COND    Total    30 100.000000   0.000000
-#>    MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U
-#> 1         10.74276   0.000000   18.98192   208.4605   87.91849        172.3171
-#> 2         26.75991  46.962726  100.00000  1865.2692  953.91452       1869.6381
-#> 3         24.52379   0.000000   42.56199   456.3876  306.37270        600.4795
-#> 4          0.00000 100.000000  100.00000  2530.1172  994.94678       1950.0599
-#> 5         18.80067   8.351538   45.95288  1298.8470  531.02949       1040.7987
-#> 6         19.29589  40.374417   78.96620  2854.3752  685.86488       1344.2705
-#> 7         10.43835   2.739137   23.61583   630.3551  177.79364        348.4691
-#> 8          0.00000 100.000000  100.00000  4783.5773  737.53853       1445.5490
-#> 9         22.31120   8.084941   52.70734  1023.1210  435.45383        853.4738
-#> 10        17.71680   4.995177   40.42878   764.4755  433.94100        850.5087
-#> 11        23.73363  23.158256   70.62551  1578.3606  554.91047       1087.6045
-#> 12         0.00000 100.000000  100.00000  3365.9571  748.29764       1466.6364
-#>    LCB95Pct.U UCB95Pct.U
-#> 1     36.1434   380.7775
-#> 2      0.0000  3734.9073
-#> 3      0.0000  1056.8671
-#> 4    580.0574  4480.1771
-#> 5    258.0483  2339.6457
-#> 6   1510.1048  4198.6457
-#> 7    281.8859   978.8242
-#> 8   3338.0283  6229.1262
-#> 9    169.6472  1876.5948
-#> 10     0.0000  1614.9842
-#> 11   490.7561  2665.9652
-#> 12  1899.3207  4832.5935
+strat_cat_ests_sp <- cat_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "NITR_COND", + weight = "WEIGHT", + stratumID = "URBAN", + subpop = "STATE" +) +strat_cat_ests_sp +#> Type Subpopulation Indicator Category nResp Estimate.P StdError.P +#> 1 STATE California NITR_COND Fair 6 8.239162 5.481100 +#> 2 STATE California NITR_COND Good 8 73.722638 13.653267 +#> 3 STATE California NITR_COND Poor 5 18.038200 12.512366 +#> 4 STATE California NITR_COND Total 19 100.000000 0.000000 +#> 5 STATE Oregon NITR_COND Fair 8 27.152211 9.592356 +#> 6 STATE Oregon NITR_COND Good 26 59.670307 9.845022 +#> 7 STATE Oregon NITR_COND Poor 13 13.177483 5.325784 +#> 8 STATE Oregon NITR_COND Total 47 100.000000 0.000000 +#> 9 STATE Washington NITR_COND Fair 10 30.396139 11.383474 +#> 10 STATE Washington NITR_COND Good 4 22.711979 9.039351 +#> 11 STATE Washington NITR_COND Poor 16 46.891882 12.109215 +#> 12 STATE Washington NITR_COND Total 30 100.000000 0.000000 +#> MarginofError.P LCB95Pct.P UCB95Pct.P Estimate.U StdError.U MarginofError.U +#> 1 10.74276 0.000000 18.98192 208.4605 87.91849 172.3171 +#> 2 26.75991 46.962726 100.00000 1865.2692 953.91452 1869.6381 +#> 3 24.52379 0.000000 42.56199 456.3876 306.37270 600.4795 +#> 4 0.00000 100.000000 100.00000 2530.1172 994.94678 1950.0599 +#> 5 18.80067 8.351538 45.95288 1298.8470 531.02949 1040.7987 +#> 6 19.29589 40.374417 78.96620 2854.3752 685.86488 1344.2705 +#> 7 10.43835 2.739137 23.61583 630.3551 177.79364 348.4691 +#> 8 0.00000 100.000000 100.00000 4783.5773 737.53853 1445.5490 +#> 9 22.31120 8.084941 52.70734 1023.1210 435.45383 853.4738 +#> 10 17.71680 4.995177 40.42878 764.4755 433.94100 850.5087 +#> 11 23.73363 23.158256 70.62551 1578.3606 554.91047 1087.6045 +#> 12 0.00000 100.000000 100.00000 3365.9571 748.29764 1466.6364 +#> LCB95Pct.U UCB95Pct.U +#> 1 36.1434 380.7775 +#> 2 0.0000 3734.9073 +#> 3 0.0000 1056.8671 +#> 4 580.0574 4480.1771 +#> 5 258.0483 2339.6457 +#> 6 1510.1048 4198.6457 +#> 7 281.8859 978.8242 +#> 8 3338.0283 6229.1262 +#> 9 169.6472 1876.5948 +#> 10 0.0000 1614.9842 +#> 11 490.7561 2665.9652 +#> 12 1899.3207 4832.5935

Continuous Variable Analysis

-

Continuous variables are analyzed in spsurvey using the cont_analysis() function. The cont_analysis() function estimates cumulative distribution functions (CDFs), percentiles, and means of continuous variables. By default, all these quantities are estimated (though this can be changed using the statsitics argument to cont_analysis()). For the quantities requiring estimation, several useful pieces of information are returned by cont_analysis(), including estimates, standard errors, margins of error, and confidence intervals. The .P suffix corresponds to estimates of proportions for each variable, while the .U suffix corresponds to estimates of total units (i.e. extent) for each variable.

+

Continuous variables are analyzed in spsurvey using the +cont_analysis() function. The cont_analysis() +function estimates cumulative distribution functions (CDFs), +percentiles, and means of continuous variables. By default, all these +quantities are estimated (though this can be changed using the +statsitics argument to cont_analysis()). For +the quantities requiring estimation, several useful pieces of +information are returned by cont_analysis(), including +estimates, standard errors, margins of error, and confidence intervals. +The .P suffix corresponds to estimates of proportions for +each variable, while the .U suffix corresponds to estimates +of total units (i.e. extent) for each variable.

Unstratified Analysis

-

To estimate the cumulative distribution function (CDF), certain percentiles, and means of BMMI, run

+

To estimate the cumulative distribution function (CDF), certain +percentiles, and means of BMMI, run

-cont_ests <- cont_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "BMMI",
-  weight = "WEIGHT"
-)
+cont_ests <- cont_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "BMMI", + weight = "WEIGHT" +)

To view the analysis results for the mean estimates, run

-cont_ests$Mean
-#>        Type Subpopulation Indicator nResp Estimate StdError MarginofError
-#> 1 All_Sites     All Sites      BMMI    96 56.50929 1.782278        3.4932
-#>   LCB95Pct UCB95Pct
-#> 1 53.01609 60.00249
-

Similarly, the CDF and select percentile estimates can be viewed (the output is omitted here) by running

+cont_ests$Mean +#> Type Subpopulation Indicator nResp Estimate StdError MarginofError +#> 1 All_Sites All Sites BMMI 96 56.50929 1.782278 3.4932 +#> LCB95Pct UCB95Pct +#> 1 53.01609 60.00249
+

Similarly, the CDF and select percentile estimates can be viewed (the +output is omitted here) by running

-cont_ests$CDF
-cont_ests$Pct
+cont_ests$CDF +cont_ests$Pct

To visualize the CDF estimates, run

-plot(cont_ests$CDF)
+plot(cont_ests$CDF)

-

The solid line indicates the CDF estimates, and the dashed lines indicate lower and upper 95% confidence interval bounds for the CDF estimates. cdf_plot() can equivalently be used in place of plot() (cdf_plot() is currently maintained for backwards compatibility with previous spsurvey versions).

-

To estimate the CDF, certain percentiles, and means of BMMI separately for each state, run

+

The solid line indicates the CDF estimates, and the dashed lines +indicate lower and upper 95% confidence interval bounds for the CDF +estimates. cdf_plot() can equivalently be used in place of +plot() (cdf_plot() is currently maintained for +backwards compatibility with previous spsurvey versions).

+

To estimate the CDF, certain percentiles, and means of +BMMI separately for each state, run

-cont_ests_sp <- cont_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "BMMI",
-  weight = "WEIGHT",
-  subpop = "STATE"
-)
+cont_ests_sp <- cont_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "BMMI", + weight = "WEIGHT", + subpop = "STATE" +)

To view the analysis results for the mean estimates, run

-cont_ests_sp$Mean
-#>    Type Subpopulation Indicator nResp Estimate StdError MarginofError LCB95Pct
-#> 1 STATE    California      BMMI    19 50.48964 4.049094      7.936079 42.55357
-#> 2 STATE        Oregon      BMMI    47 61.29675 2.581032      5.058730 56.23802
-#> 3 STATE    Washington      BMMI    30 54.23036 3.143924      6.161977 48.06838
-#>   UCB95Pct
-#> 1 58.42572
-#> 2 66.35548
-#> 3 60.39234
+cont_ests_sp$Mean +#> Type Subpopulation Indicator nResp Estimate StdError MarginofError LCB95Pct +#> 1 STATE California BMMI 19 50.48964 4.049094 7.936079 42.55357 +#> 2 STATE Oregon BMMI 47 61.29675 2.581032 5.058730 56.23802 +#> 3 STATE Washington BMMI 30 54.23036 3.143924 6.161977 48.06838 +#> UCB95Pct +#> 1 58.42572 +#> 2 66.35548 +#> 3 60.39234

Stratified Analysis

-

To estimate the CDF, certain percentiles, and means of BMMI for a design stratified by URBAN category, run

+

To estimate the CDF, certain percentiles, and means of +BMMI for a design stratified by URBAN +category, run

-strat_cont_ests <- cont_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "BMMI",
-  weight = "WEIGHT",
-  stratumID = "URBAN"
-)
+strat_cont_ests <- cont_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "BMMI", + weight = "WEIGHT", + stratumID = "URBAN" +)

To view the analysis results for the mean estimates, run

-strat_cont_ests$Mean
-#>        Type Subpopulation Indicator nResp Estimate StdError MarginofError
-#> 1 All_Sites     All Sites      BMMI    96 56.50929 1.795959      3.520015
-#>   LCB95Pct UCB95Pct
-#> 1 52.98928 60.02931
+strat_cont_ests$Mean +#> Type Subpopulation Indicator nResp Estimate StdError MarginofError +#> 1 All_Sites All Sites BMMI 96 56.50929 1.795959 3.520015 +#> LCB95Pct UCB95Pct +#> 1 52.98928 60.02931

To then compute these estimates separately for each state, run

-strat_cont_ests_sp <- cont_analysis(
-  NLA_PNW,
-  siteID = "SITE_ID",
-  vars = "BMMI",
-  weight = "WEIGHT",
-  stratumID = "URBAN",
-  subpop = "STATE",
-)
+strat_cont_ests_sp <- cont_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars = "BMMI", + weight = "WEIGHT", + stratumID = "URBAN", + subpop = "STATE", +)

To view the analysis results for the mean estimates, run

-strat_cont_ests_sp$Mean
-#>    Type Subpopulation Indicator nResp Estimate StdError MarginofError LCB95Pct
-#> 1 STATE    California      BMMI    19 50.48964 4.110046      8.055543 42.43410
-#> 2 STATE        Oregon      BMMI    47 61.29675 1.630357      3.195441 58.10131
-#> 3 STATE    Washington      BMMI    30 54.23036 2.930517      5.743708 48.48665
-#>   UCB95Pct
-#> 1 58.54519
-#> 2 64.49219
-#> 3 59.97407
+strat_cont_ests_sp$Mean +#> Type Subpopulation Indicator nResp Estimate StdError MarginofError LCB95Pct +#> 1 STATE California BMMI 19 50.48964 4.110046 8.055543 42.43410 +#> 2 STATE Oregon BMMI 47 61.29675 1.630357 3.195441 58.10131 +#> 3 STATE Washington BMMI 30 54.23036 2.930517 5.743708 48.48665 +#> UCB95Pct +#> 1 58.54519 +#> 2 64.49219 +#> 3 59.97407

Additional Analysis Approaches

-

Alongside the cat_analysis() and cont_analysis() functions, spsurvey offers functions for estimating attributable risk, relative risk, risk difference, change, and trend. Attributable risk analysis, relative risk analysis, and risk difference analysis quantify the attributable risk, relative risk, and risk difference, respectively, of environmental resources being in poor condition after exposure to a stressor. Attributable risk analysis is performed using the attrisk_analysis() function, relative risk analysis is performed using the relrisk_analysis() function, and risk difference analysis is performed using the diffrisk_analysis() function. Change and trend analysis capture the behavior of environmental resources between two samples, while trend analysis generalizes this approach to include more than two samples. Often, change and trend analyses are performed to study an environmental resource through time. Change analysis is performed using the change_analysis() function, and trend analysis is performed using the trend_analysis() function. The attrisk_analysis(), relrisk_analysis(), diffrisk_analysis(), change_analysis() and trend_analysis() functions all share very similar syntax with the cat_analysis() and cont_analysis() functions.

+

Alongside the cat_analysis() and +cont_analysis() functions, spsurvey offers functions for +estimating attributable risk, relative risk, risk difference, change, +and trend. Attributable risk analysis, relative risk analysis, and risk +difference analysis quantify the attributable risk, relative risk, and +risk difference, respectively, of environmental resources being in poor +condition after exposure to a stressor. Attributable risk analysis is +performed using the attrisk_analysis() function, relative +risk analysis is performed using the relrisk_analysis() +function, and risk difference analysis is performed using the +diffrisk_analysis() function. Change and trend analysis +capture the behavior of environmental resources between two samples, +while trend analysis generalizes this approach to include more than two +samples. Often, change and trend analyses are performed to study an +environmental resource through time. Change analysis is performed using +the change_analysis() function, and trend analysis is +performed using the trend_analysis() function. The +attrisk_analysis(), relrisk_analysis(), +diffrisk_analysis(), change_analysis() and +trend_analysis() functions all share very similar syntax +with the cat_analysis() and cont_analysis() +functions.

Attributable Risk, Relative Risk, and Risk Difference Analysis

-

The attributable risk is defined as \[1 - \frac{P(Response = Poor | Stressor = Good)}{P(Response = Poor)},\] where \(P(\cdot)\) is a probability and \(P(\cdot | \cdot)\) is a conditional probability. The attributable risk measures the proportion of the response variable in poor condition that could be eliminated if the stressor was always in good condition. To estimate the attributable risk of benthic macroinvertebrates with a phosphorous condition stressor, run

+

The attributable risk is defined as \[1 +- \frac{P(Response = Poor | Stressor = Good)}{P(Response = +Poor)},\] where \(P(\cdot)\) is +a probability and \(P(\cdot | \cdot)\) +is a conditional probability. The attributable risk measures the +proportion of the response variable in poor condition that could be +eliminated if the stressor was always in good condition. To estimate the +attributable risk of benthic macroinvertebrates with a phosphorous +condition stressor, run

-attrisk_ests <- attrisk_analysis(
-  NLA_PNW, 
-  siteID = "SITE_ID",
-  vars_response = "BMMI_COND",
-  vars_stressor = "PHOS_COND",
-  weight = "WEIGHT"
-)
-attrisk_ests
-#>        Type Subpopulation  Response  Stressor nResp  Estimate StdError_log
-#> 1 All_Sites     All Sites BMMI_COND PHOS_COND    96 0.6201042     0.624808
-#>   MarginofError_log  LCB95Pct  UCB95Pct WeightTotal Count_RespPoor_StressPoor
-#> 1          1.224601 -0.292713 0.8883582    10679.65                         5
-#>   Count_RespPoor_StressGood Count_RespGood_StressPoor Count_RespGood_StressGood
-#> 1                         7                        18                        66
-#>   Prop_RespPoor_StressPoor Prop_RespPoor_StressGood Prop_RespGood_StressPoor
-#> 1               0.03971418               0.01738181                 0.158931
-#>   Prop_RespGood_StressGood
-#> 1                 0.783973
-

The relative risk is defined as \[\frac{P(Response = Poor | Stressor = Poor)}{P(Response = Poor | Stressor = Good)},\] which measures the risk of the response variable being in poor condition relative to the stressor’s condition. To estimate the relative risk of benthic macroinvertebrates being in poor condition with a phosphorous condition category stressor, run

+attrisk_ests <- attrisk_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars_response = "BMMI_COND", + vars_stressor = "PHOS_COND", + weight = "WEIGHT" +) +attrisk_ests +#> Type Subpopulation Response Stressor nResp Estimate StdError_log +#> 1 All_Sites All Sites BMMI_COND PHOS_COND 96 0.6201042 0.624808 +#> MarginofError_log LCB95Pct UCB95Pct WeightTotal Count_RespPoor_StressPoor +#> 1 1.224601 -0.292713 0.8883582 10679.65 5 +#> Count_RespPoor_StressGood Count_RespGood_StressPoor Count_RespGood_StressGood +#> 1 7 18 66 +#> Prop_RespPoor_StressPoor Prop_RespPoor_StressGood Prop_RespGood_StressPoor +#> 1 0.03971418 0.01738181 0.158931 +#> Prop_RespGood_StressGood +#> 1 0.783973
+

The relative risk is defined as \[\frac{P(Response = Poor | Stressor = +Poor)}{P(Response = Poor | Stressor = Good)},\] which measures +the risk of the response variable being in poor condition relative to +the stressor’s condition. To estimate the relative risk of benthic +macroinvertebrates being in poor condition with a phosphorous condition +category stressor, run

-relrisk_ests <- relrisk_analysis(
-  NLA_PNW, 
-  siteID = "SITE_ID",
-  vars_response = "BMMI_COND",
-  vars_stressor = "PHOS_COND",
-  weight = "WEIGHT"
-)
-relrisk_ests
-#>        Type Subpopulation  Response  Stressor nResp Estimate Estimate_num
-#> 1 All_Sites     All Sites BMMI_COND PHOS_COND    96 9.217166    0.1999252
-#>   Estimate_denom StdError_log MarginofError_log LCB95Pct UCB95Pct WeightTotal
-#> 1     0.02169053    0.8753855          1.715724 1.657555 51.25389    10679.65
-#>   Count_RespPoor_StressPoor Count_RespPoor_StressGood Count_RespGood_StressPoor
-#> 1                         5                         7                        18
-#>   Count_RespGood_StressGood Prop_RespPoor_StressPoor Prop_RespPoor_StressGood
-#> 1                        66               0.03971418               0.01738181
-#>   Prop_RespGood_StressPoor Prop_RespGood_StressGood
-#> 1                 0.158931                 0.783973
-

The risk difference is defined as \[P(Response = Poor | Stressor = Poor) - P(Response = Poor | Stressor = Good),\] which measures the risk of the response variable being in poor condition differenced by the stressor’s condition. To estimate the risk difference of benthic macroinvertebrates being in poor condition with a phosphorous condition category stressor, run

+relrisk_ests <- relrisk_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars_response = "BMMI_COND", + vars_stressor = "PHOS_COND", + weight = "WEIGHT" +) +relrisk_ests +#> Type Subpopulation Response Stressor nResp Estimate Estimate_num +#> 1 All_Sites All Sites BMMI_COND PHOS_COND 96 9.217166 0.1999252 +#> Estimate_denom StdError_log MarginofError_log LCB95Pct UCB95Pct WeightTotal +#> 1 0.02169053 0.8753855 1.715724 1.657555 51.25389 10679.65 +#> Count_RespPoor_StressPoor Count_RespPoor_StressGood Count_RespGood_StressPoor +#> 1 5 7 18 +#> Count_RespGood_StressGood Prop_RespPoor_StressPoor Prop_RespPoor_StressGood +#> 1 66 0.03971418 0.01738181 +#> Prop_RespGood_StressPoor Prop_RespGood_StressGood +#> 1 0.158931 0.783973
+

The risk difference is defined as \[P(Response = Poor | Stressor = Poor) - P(Response += Poor | Stressor = Good),\] which measures the risk of the +response variable being in poor condition differenced by the stressor’s +condition. To estimate the risk difference of benthic macroinvertebrates +being in poor condition with a phosphorous condition category stressor, +run

-diffrisk_ests <- diffrisk_analysis(
-  NLA_PNW, 
-  siteID = "SITE_ID",
-  vars_response = "BMMI_COND",
-  vars_stressor = "PHOS_COND",
-  weight = "WEIGHT"
-)
-diffrisk_ests
-#>        Type Subpopulation  Response  Stressor nResp  Estimate
-#> 1 All_Sites     All Sites BMMI_COND PHOS_COND    96 0.1782347
-#>   Estimate_StressPoor Estimate_StressGood  StdError MarginofError   LCB95Pct
-#> 1           0.1999252          0.02169053 0.1139557      0.223349 -0.0451143
-#>    UCB95Pct WeightTotal Count_RespPoor_StressPoor Count_RespPoor_StressGood
-#> 1 0.4015837    10679.65                         5                         7
-#>   Count_RespGood_StressPoor Count_RespGood_StressGood Prop_RespPoor_StressPoor
-#> 1                        18                        66               0.03971418
-#>   Prop_RespPoor_StressGood Prop_RespGood_StressPoor Prop_RespGood_StressGood
-#> 1               0.01738181                 0.158931                 0.783973
-

By default, the levels of the variables in vars_response and vars_stressor are assumed to equal "Poor" (event occurs) or "Good" (event does not occur). If those default levels do not match the levels of the variables in vars_response and vars_stressor, the levels of vars_response and vars_stressor must be explicitly stated using the response_levels and stressor_levels arguments, respectively. Similar to cat_analysis() and cont_analysis() from the previous sections, subpopulations and stratification are incorporated using subpops and stratumID, respectively. For more on attributable and relative risk in an environmental resource context, see Van Sickle and Paulsen (2008).

+diffrisk_ests <- diffrisk_analysis( + NLA_PNW, + siteID = "SITE_ID", + vars_response = "BMMI_COND", + vars_stressor = "PHOS_COND", + weight = "WEIGHT" +) +diffrisk_ests +#> Type Subpopulation Response Stressor nResp Estimate +#> 1 All_Sites All Sites BMMI_COND PHOS_COND 96 0.1782347 +#> Estimate_StressPoor Estimate_StressGood StdError MarginofError LCB95Pct +#> 1 0.1999252 0.02169053 0.1139557 0.223349 -0.0451143 +#> UCB95Pct WeightTotal Count_RespPoor_StressPoor Count_RespPoor_StressGood +#> 1 0.4015837 10679.65 5 7 +#> Count_RespGood_StressPoor Count_RespGood_StressGood Prop_RespPoor_StressPoor +#> 1 18 66 0.03971418 +#> Prop_RespPoor_StressGood Prop_RespGood_StressPoor Prop_RespGood_StressGood +#> 1 0.01738181 0.158931 0.783973 +

By default, the levels of the variables in vars_response +and vars_stressor are assumed to equal "Poor" +(event occurs) or "Good" (event does not occur). If those +default levels do not match the levels of the variables in +vars_response and vars_stressor, the levels of +vars_response and vars_stressor must be +explicitly stated using the response_levels and +stressor_levels arguments, respectively. Similar to +cat_analysis() and cont_analysis() from the +previous sections, subpopulations and stratification are incorporated +using subpops and stratumID, respectively. For +more on attributable and relative risk in an environmental resource +context, see Van Sickle and Paulsen (2008).

Change and Trend Analysis

-

To demonstrate change analysis, we use the NRSA_EPA7 data. There are several variables in NRSA_EPA7 you will use next:

+

To demonstrate change analysis, we use the NRSA_EPA7 +data. There are several variables in NRSA_EPA7 you will use +next:

  • SITE_ID: a unique site identifier
  • WEIGHT: the survey design weights
  • -NITR_COND: nitrogen condition category (Good, Fair, and Poor)
  • +NITR_COND: nitrogen condition category +(Good, Fair, and Poor)
  • BMMI: benthic macroinvertebrate multi-metric index
  • YEAR: probability sample (survey) year
-

To estimate the change between samples (time points) for BMMI (a continuous variable) and NITR_COND (a categorical variable), run

+

To estimate the change between samples (time points) for +BMMI (a continuous variable) and NITR_COND (a +categorical variable), run

-change_ests <- change_analysis(
-  NRSA_EPA7,
-  siteID = "SITE_ID",
-  vars_cont = "BMMI",
-  vars_cat = "NITR_COND",
-  surveyID = "YEAR",
-  weight = "WEIGHT"
-)
-

The surveyID argument is the variable in the data distinguishing the different samples (YEAR in the previous example).

-

To view the analysis results for NITR_COND (the categorical variable), run

+change_ests <- change_analysis( + NRSA_EPA7, + siteID = "SITE_ID", + vars_cont = "BMMI", + vars_cat = "NITR_COND", + surveyID = "YEAR", + weight = "WEIGHT" +)
+

The surveyID argument is the variable in the data +distinguishing the different samples (YEAR in the previous +example).

+

To view the analysis results for NITR_COND (the +categorical variable), run

-change_ests$catsum
-#>   Survey_1 Survey_2      Type Subpopulation Indicator     Category  DiffEst.P
-#> 1  2008-09  2013-14 All_Sites     All Sites NITR_COND         Fair -1.8867976
-#> 2  2008-09  2013-14 All_Sites     All Sites NITR_COND         Good -2.9648182
-#> 3  2008-09  2013-14 All_Sites     All Sites NITR_COND Not Assessed -0.2447633
-#> 4  2008-09  2013-14 All_Sites     All Sites NITR_COND         Poor  5.0963791
-#>   StdError.P MarginofError.P  LCB95Pct.P UCB95Pct.P  DiffEst.U StdError.U
-#> 1  3.5366139       6.9316358  -8.8184334  5.0448382 -2919.8839  4990.4714
-#> 2  4.7633367       9.3359683 -12.3007865  6.3711502 -4597.9365  6772.4175
-#> 3  0.2072643       0.4062305  -0.6509938  0.1614672  -363.1305   306.1656
-#> 4  5.8020788      11.3718655  -6.2754864 16.4682446  6598.4548 19777.9218
-#>   MarginofError.U LCB95Pct.U UCB95Pct.U nResp_1 Estimate.P_1 StdError.P_1
-#> 1       9781.1441 -12701.028   6861.260      37   11.2929433    2.7579622
-#> 2      13273.6943 -17871.631   8675.758      22   18.5076549    3.6712147
-#> 3        600.0735   -963.204    236.943       1    0.2447633    0.2072643
-#> 4      38764.0144 -32165.560  45362.469     119   69.9546385    4.3636869
-#>   MarginofError.P_1 LCB95Pct.P_1 UCB95Pct.P_1 Estimate.U_1 StdError.U_1
-#> 1         5.4055067     5.887437   16.6984500   16754.1954    3998.9767
-#> 2         7.1954486    11.312206   25.7031034   27457.9317    5388.5407
-#> 3         0.4062305     0.000000    0.6509938     363.1305     306.1656
-#> 4         8.5526691    61.401969   78.5073076  103784.6070   12266.4461
-#>   MarginofError.U_1 LCB95Pct.U_1 UCB95Pct.U_1 nResp_2 Estimate.P_2 StdError.P_2
-#> 1         7837.8504     8916.345    24592.046      28     9.406146     2.213884
-#> 2        10561.3456    16896.586    38019.277      34    15.542837     3.035055
-#> 3          600.0735        0.000      963.204       0     0.000000     0.000000
-#> 4        24041.7926    79742.814   127826.400     112    75.051018     3.823919
-#>   MarginofError.P_2 LCB95Pct.P_2 UCB95Pct.P_2 Estimate.U_2 StdError.U_2
-#> 1          4.339133     5.067013     13.74528     13834.31     2985.463
-#> 2          5.948599     9.594238     21.49144     22860.00     4102.349
-#> 3          0.000000     0.000000      0.00000         0.00        0.000
-#> 4          7.494743    67.556274     82.54576    110383.06    15514.525
-#>   MarginofError.U_2 LCB95Pct.U_2 UCB95Pct.U_2
-#> 1          5851.400     7982.911     19685.71
-#> 2          8040.456    14819.539     30900.45
-#> 3             0.000        0.000         0.00
-#> 4         30407.911    79975.151    140790.97
-

Estimates are provided for the difference between the two samples and for each of the two individual samples (the _1 and _2 suffixes).

-

To view the analysis results for BMMI (the continuous variable), run

+change_ests$catsum +#> Survey_1 Survey_2 Type Subpopulation Indicator Category DiffEst.P +#> 1 2008-09 2013-14 All_Sites All Sites NITR_COND Fair -1.8867976 +#> 2 2008-09 2013-14 All_Sites All Sites NITR_COND Good -2.9648182 +#> 3 2008-09 2013-14 All_Sites All Sites NITR_COND Not Assessed -0.2447633 +#> 4 2008-09 2013-14 All_Sites All Sites NITR_COND Poor 5.0963791 +#> StdError.P MarginofError.P LCB95Pct.P UCB95Pct.P DiffEst.U StdError.U +#> 1 3.5366139 6.9316358 -8.8184334 5.0448382 -2919.8839 4990.4714 +#> 2 4.7633367 9.3359683 -12.3007865 6.3711502 -4597.9365 6772.4175 +#> 3 0.2072643 0.4062305 -0.6509938 0.1614672 -363.1305 306.1656 +#> 4 5.8020788 11.3718655 -6.2754864 16.4682446 6598.4548 19777.9218 +#> MarginofError.U LCB95Pct.U UCB95Pct.U nResp_1 Estimate.P_1 StdError.P_1 +#> 1 9781.1441 -12701.028 6861.260 37 11.2929433 2.7579622 +#> 2 13273.6943 -17871.631 8675.758 22 18.5076549 3.6712147 +#> 3 600.0735 -963.204 236.943 1 0.2447633 0.2072643 +#> 4 38764.0144 -32165.560 45362.469 119 69.9546385 4.3636869 +#> MarginofError.P_1 LCB95Pct.P_1 UCB95Pct.P_1 Estimate.U_1 StdError.U_1 +#> 1 5.4055067 5.887437 16.6984500 16754.1954 3998.9767 +#> 2 7.1954486 11.312206 25.7031034 27457.9317 5388.5407 +#> 3 0.4062305 0.000000 0.6509938 363.1305 306.1656 +#> 4 8.5526691 61.401969 78.5073076 103784.6070 12266.4461 +#> MarginofError.U_1 LCB95Pct.U_1 UCB95Pct.U_1 nResp_2 Estimate.P_2 StdError.P_2 +#> 1 7837.8504 8916.345 24592.046 28 9.406146 2.213884 +#> 2 10561.3456 16896.586 38019.277 34 15.542837 3.035055 +#> 3 600.0735 0.000 963.204 0 0.000000 0.000000 +#> 4 24041.7926 79742.814 127826.400 112 75.051018 3.823919 +#> MarginofError.P_2 LCB95Pct.P_2 UCB95Pct.P_2 Estimate.U_2 StdError.U_2 +#> 1 4.339133 5.067013 13.74528 13834.31 2985.463 +#> 2 5.948599 9.594238 21.49144 22860.00 4102.349 +#> 3 0.000000 0.000000 0.00000 0.00 0.000 +#> 4 7.494743 67.556274 82.54576 110383.06 15514.525 +#> MarginofError.U_2 LCB95Pct.U_2 UCB95Pct.U_2 +#> 1 5851.400 7982.911 19685.71 +#> 2 8040.456 14819.539 30900.45 +#> 3 0.000 0.000 0.00 +#> 4 30407.911 79975.151 140790.97 +

Estimates are provided for the difference between the two samples and +for each of the two individual samples (the _1 and +_2 suffixes).

+

To view the analysis results for BMMI (the continuous +variable), run

-change_ests$contsum_mean
-#>   Survey_1 Survey_2      Type Subpopulation Indicator  DiffEst StdError
-#> 1  2008-09  2013-14 All_Sites     All Sites      BMMI 3.971559 2.561155
-#>   MarginofError  LCB95Pct UCB95Pct nResp_1 Estimate_1 StdError_1
-#> 1      5.019771 -1.048211  8.99133     179   25.88274   1.468744
-#>   MarginofError_1 LCB95Pct_1 UCB95Pct_1 nResp_2 Estimate_2 StdError_2
-#> 1        2.878686   23.00405   28.76142     174    29.8543   2.098166
-#>   MarginofError_2 LCB95Pct_2 UCB95Pct_2
-#> 1        4.112331   25.74196   33.96663
-

Though we don’t show an example here, the median can be estimated using the test argument in change_anlaysis().

-

Trend analysis generalizes change analysis to more than two samples (usually time points). Though we omit an example here, the arguments to trend_analysis() are very similar to the arguments for change_analysis(). One difference is that trend_analysis() contains arguments that specify which statistical model to apply to the estimates from each sample.

+change_ests$contsum_mean +#> Survey_1 Survey_2 Type Subpopulation Indicator DiffEst StdError +#> 1 2008-09 2013-14 All_Sites All Sites BMMI 3.971559 2.561155 +#> MarginofError LCB95Pct UCB95Pct nResp_1 Estimate_1 StdError_1 +#> 1 5.019771 -1.048211 8.99133 179 25.88274 1.468744 +#> MarginofError_1 LCB95Pct_1 UCB95Pct_1 nResp_2 Estimate_2 StdError_2 +#> 1 2.878686 23.00405 28.76142 174 29.8543 2.098166 +#> MarginofError_2 LCB95Pct_2 UCB95Pct_2 +#> 1 4.112331 25.74196 33.96663 +

Though we don’t show an example here, the median can be estimated +using the test argument in +change_anlaysis().

+

Trend analysis generalizes change analysis to more than two samples +(usually time points). Though we omit an example here, the arguments to +trend_analysis() are very similar to the arguments for +change_analysis(). One difference is that +trend_analysis() contains arguments that specify which +statistical model to apply to the estimates from each sample.

Variance Estimation

-

The default variance estimator in spsurvey is the local neighborhood variance estimator (Stevens and Olsen, 2003). The local neighborhood variance estimator incorporates the spatial locations of design sites into the variance estimation process. Because the local neighborhood variance estimator incorporates spatial locations, the resulting variance estimate tends to be smaller than variance estimates from approaches ignoring spatial locations. The local neighborhood variance estimator requires x-coordinates and y-coordinates of the design sites. When the analysis data is an sf object, the coordinates from the data’s geometry are used. When the analysis data is just a data frame, x-coordinates and y-coordinates (from an appropriate coordinate reference system) must be provided using the xcoord and ycoord arguments, respectively, of the analysis function being used.

-

Several additional variance estimation options are available in spsurvey’s analysis functions through the vartype and jointprob arguments.

+

The default variance estimator in spsurvey is the local neighborhood +variance estimator (Stevens and Olsen, 2003). The local neighborhood +variance estimator incorporates the spatial locations of design sites +into the variance estimation process. Because the local neighborhood +variance estimator incorporates spatial locations, the resulting +variance estimate tends to be smaller than variance estimates from +approaches ignoring spatial locations. The local neighborhood variance +estimator requires x-coordinates and y-coordinates of the design sites. +When the analysis data is an sf object, the coordinates +from the data’s geometry are used. When the analysis data is just a data +frame, x-coordinates and y-coordinates (from an appropriate coordinate +reference system) must be provided using the xcoord and +ycoord arguments, respectively, of the analysis function +being used.

+

Several additional variance estimation options are available in +spsurvey’s analysis functions through the vartype and +jointprob arguments.

References

-

Sickle, J. V., & Paulsen, S. G. (2008). Assessing the attributable risks, relative risks, and regional extents of aquatic stressors. Journal of the North American Benthological Society, 27(4), 920-931.

-

Stevens Jr, D. L., & Olsen, A. R. (2003). Variance estimation for spatially balanced samples of environmental resources. Environmetrics, 14(6), 593-610.

+

Sickle, J. V., & Paulsen, S. G. (2008). Assessing the +attributable risks, relative risks, and regional extents of aquatic +stressors. Journal of the North American Benthological Society, +27(4), 920-931.

+

Stevens Jr, D. L., & Olsen, A. R. (2003). Variance estimation for +spatially balanced samples of environmental resources. +Environmetrics, 14(6), 593-610.

@@ -627,7 +788,7 @@

References

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/articles/index.html b/docs/articles/index.html index 3747d2a..8735f5b 100644 --- a/docs/articles/index.html +++ b/docs/articles/index.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -87,7 +87,7 @@

All vignettes

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/articles/sampling.html b/docs/articles/sampling.html index 419eef9..77edef8 100644 --- a/docs/articles/sampling.html +++ b/docs/articles/sampling.html @@ -33,7 +33,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -88,7 +88,8 @@

Introduction

-

The Generalized Random Tessellation Stratified (GRTS) algorithm (Stevens and Olsen, 2004, Olsen et.al., 2012) is a spatially balanced sampling algorithm available in spsurvey. The GRTS algorithm is used to sample from finite populations (point resources – e.g. lakes treated as a whole) and infinite populations (linear resources – e.g. rivers and streams; areal resources – e.g. wetlands and coastlines) and is implemented in spsurvey using the grts() function. The output from grts() contains the design sites and additional information about the sampling design. More specifically, it is a list with five elements:

+

The Generalized Random Tessellation Stratified (GRTS) algorithm +(Stevens and Olsen, 2004, Olsen et.al., 2012) is a spatially balanced +sampling algorithm available in spsurvey. The GRTS algorithm is used to +sample from finite populations (point resources – e.g. lakes treated as +a whole) and infinite populations (linear resources – e.g. rivers and +streams; areal resources – e.g. wetlands and coastlines) and is +implemented in spsurvey using the grts() function. The +output from grts() contains the design sites and additional +information about the sampling design. More specifically, it is a list +with five elements:

  1. -sites_legacy: legacy sites included in the base (main) sample (see Legacy sampling)
  2. +sites_legacy: legacy sites included in the base (main) +sample (see Legacy sampling)
  3. -sites_base: sites included in the base (main) sample that are not legacy sites
  4. +sites_base: sites included in the base (main) sample +that are not legacy sites
  5. -sites_over: replacement sites obtained using reverse hierarchical ordering (see Reverse hierarchical ordering)
  6. +sites_over: replacement sites obtained using reverse +hierarchical ordering (see Reverse hierarchical +ordering)
  7. -sites_near: replacement sites obtained using nearest neighbor (see Nearest neighbor)
  8. +sites_near: replacement sites obtained using nearest +neighbor (see Nearest neighbor)
  9. design: a list detailing the sampling design
-

We use the NE_Lakes sampling frame in spsurvey to introduce spatially balanced sampling via the GRTS algorithm. NE_Lakes is a finite sampling frame and contains lakes (treated as a whole) from the Northeastern United States. There are a few variables in NE_Lakes you will throughout this vignette:

+

We use the NE_Lakes sampling frame in spsurvey to +introduce spatially balanced sampling via the GRTS algorithm. +NE_Lakes is a finite sampling frame and contains lakes +(treated as a whole) from the Northeastern United States. There are a +few variables in NE_Lakes you will throughout this +vignette:

  1. AREA: lake area in hectares
  2. @@ -126,10 +147,11 @@

    Introduction
    -library(spsurvey)
    -set.seed(51)

+library(spsurvey) +set.seed(51)

Unstratified sampling @@ -137,146 +159,192 @@

Unstratified sampling

Equal inclusion probabilities

-

To select an unstratified GRTS sample where each site in the sampling frame has an equal probability of selection (i.e. inclusion probability), run

+

To select an unstratified GRTS sample where each site in the sampling +frame has an equal probability of selection (i.e. inclusion +probability), run

-eqprob <- grts(NE_Lakes, n_base = 50)
-

The first argument to grts() is the sampling frame, which must be an sf object. The second argument is n_base, which specifies the number of sites in the base (main) sample. The sites_base object in eqprob is an sf object and contains the original columns of NE_Lakes as well as a few additional columns such as a site identifier, latitude and longitude coordinates, inclusion probabilities, and design weights (to be used in analyses after collecting data).

+eqprob <- grts(NE_Lakes, n_base = 50)

+

The first argument to grts() is the sampling frame, +which must be an sf object. The second argument is +n_base, which specifies the number of sites in the base +(main) sample. The sites_base object in eqprob +is an sf object and contains the original columns of +NE_Lakes as well as a few additional columns such as a site +identifier, latitude and longitude coordinates, inclusion probabilities, +and design weights (to be used in analyses after collecting data).

To print a summary of site counts, run

-eqprob
-#> Summary of Site Counts: 
-#> 
-#>    total    siteuse  
-#>  total:50   Base:50
-

To visualize the design sites overlain onto the sampling frame, run

+eqprob +#> Summary of Site Counts: +#> +#> total siteuse +#> total:50 Base:50 +

To visualize the design sites overlain onto the sampling frame, +run

-plot(eqprob, NE_Lakes, key.width = lcm(3))
+plot(eqprob, NE_Lakes, key.width = lcm(3))

-

The key.width argument extends the plot’s margin to fit the legend text nicely within the plot. sp_plot() can equivalently be used in place of plot() (sp_plot() is currently maintained for backwards compatibility with previous spsurvey versions).

+

The key.width argument extends the plot’s margin to fit +the legend text nicely within the plot. sp_plot() can +equivalently be used in place of plot() +(sp_plot() is currently maintained for backwards +compatibility with previous spsurvey versions).

Unequal inclusion probabilities

-

To select an unstratified GRTS sample where each site in the sampling frame has unequal inclusion probabilities according to some categorical variable, run

+

To select an unstratified GRTS sample where each site in the sampling +frame has unequal inclusion probabilities according to some categorical +variable, run

-caty_n <- c(small = 40, large = 10)
-uneqprob <- grts(
-  NE_Lakes,
-  n_base = 50,
-  caty_var = "AREA_CAT",
-  caty_n = caty_n
-)
-uneqprob
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base
-#> total   50
-#> 
-#> siteuse by caty: 
-#>       Base
-#> large   10
-#> small   40
-

caty_var is the unequal inclusion probability variable ("AREA_CAT" in NE_Lakes), and caty_n is a vector whose names are the levels in caty_var and whose values are the expected sample sizes in each category (the sum of these expected samples sizes must equal n_base). In this sample, inclusion probabilities are adjusted so that on average, there are 40 small lakes and 10 large lakes selected.

+caty_n <- c(small = 40, large = 10) +uneqprob <- grts( + NE_Lakes, + n_base = 50, + caty_var = "AREA_CAT", + caty_n = caty_n +) +uneqprob +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base +#> total 50 +#> +#> siteuse by caty: +#> Base +#> large 10 +#> small 40
+

caty_var is the unequal inclusion probability variable +("AREA_CAT" in NE_Lakes), and +caty_n is a vector whose names are the levels in +caty_var and whose values are the expected sample sizes in +each category (the sum of these expected samples sizes must equal +n_base). In this sample, inclusion probabilities are +adjusted so that on average, there are 40 small lakes and 10 large lakes +selected.

Proportional inclusion probabilities

-

To select an unstratified GRTS sample where each site in the sampling frame has inclusion probability proportional to a positive, continuous variable, run

+

To select an unstratified GRTS sample where each site in the sampling +frame has inclusion probability proportional to a positive, continuous +variable, run

-propprob <- grts(
-  NE_Lakes,
-  n_base = 50,
-  aux_var = "AREA"
-)
-

aux_var is the proportional probability (auxiliary) variable ("AREA" in NE_Lakes). Proportional (to size) inclusion probabilities are useful because they can increase the precision of estimates when the response variable is positively correlated with the proportional probability variable.

+propprob <- grts( + NE_Lakes, + n_base = 50, + aux_var = "AREA" +)
+

aux_var is the proportional probability (auxiliary) +variable ("AREA" in NE_Lakes). Proportional +(to size) inclusion probabilities are useful because they can increase +the precision of estimates when the response variable is positively +correlated with the proportional probability variable.

Stratified sampling

-

Stratified sampling designs partition the sampling frame into distinct groups called strata and sites are selected within each stratum independently of other strata. Stratified sampling is useful from a practical perspective because it allows for stratum-specific sample sizes and implementation practices (e.g. each stratum may have different sampling protocols). Stratified sampling is useful from a statistical perspective because estimates from stratified samples tend to be more precise than estimates from unstratified samples.

+

Stratified sampling designs partition the sampling frame into +distinct groups called strata and sites are selected within each stratum +independently of other strata. Stratified sampling is useful from a +practical perspective because it allows for stratum-specific sample +sizes and implementation practices (e.g. each stratum may have different +sampling protocols). Stratified sampling is useful from a statistical +perspective because estimates from stratified samples tend to be more +precise than estimates from unstratified samples.

Equal inclusion probabilities

-

To select a sample stratified by elevation category with equal inclusion probabilities in each stratum, run

+

To select a sample stratified by elevation category with equal +inclusion probabilities in each stratum, run

-strata_n <- c(low = 25, high = 15)
-strat_eqprob <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
-strat_eqprob
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base
-#> total   40
-#> 
-#> siteuse by stratum: 
-#>      Base
-#> high   15
-#> low    25
-

strata_n is a named vector whose names represent the strata and whose values represent strata-specific sample sizes, and stratum_var is the stratification variable (ELEV_CAT in NE_Lakes).

-

To visualize the design sites overlain onto the sampling frame (separately for each stratum), run

+strata_n <- c(low = 25, high = 15) +strat_eqprob <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT") +strat_eqprob +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base +#> total 40 +#> +#> siteuse by stratum: +#> Base +#> high 15 +#> low 25
+

strata_n is a named vector whose names represent the +strata and whose values represent strata-specific sample sizes, and +stratum_var is the stratification variable +(ELEV_CAT in NE_Lakes).

+

To visualize the design sites overlain onto the sampling frame +(separately for each stratum), run

-plot(
-  strat_eqprob,
-  formula = siteuse ~ ELEV_CAT,
-  NE_Lakes,
-  key.width = lcm(3)
-)
+plot( + strat_eqprob, + formula = siteuse ~ ELEV_CAT, + NE_Lakes, + key.width = lcm(3) +)

Unequal inclusion probabilities

-

To select a sample stratified by elevation category with unequal inclusion probabilities for each area category, run

+

To select a sample stratified by elevation category with unequal +inclusion probabilities for each area category, run

-caty_n <- list(
-  low = c(small = 20, large = 5),
-  high = c(small = 10, large = 5)
-)
-strat_uneqprob <- grts(
-  NE_Lakes,
-  n_base = strata_n,
-  stratum_var = "ELEV_CAT",
-  caty_var = "AREA_CAT",
-  caty_n = caty_n
-)
-strat_uneqprob
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base
-#> total   40
-#> 
-#> siteuse by stratum: 
-#>      Base
-#> high   15
-#> low    25
-#> 
-#> siteuse by caty: 
-#>       Base
-#> large    9
-#> small   31
-#> 
-#> siteuse by stratum:caty: 
-#>            Base
-#> high:large    3
-#> low:large     6
-#> high:small   12
-#> low:small    19
-

caty_n is now a list: the first element contains the expected sample sizes for each area category in the low elevation stratum, and the second element contains the expected sample sizes for each area category in the high elevation stratum.

+caty_n <- list( + low = c(small = 20, large = 5), + high = c(small = 10, large = 5) +) +strat_uneqprob <- grts( + NE_Lakes, + n_base = strata_n, + stratum_var = "ELEV_CAT", + caty_var = "AREA_CAT", + caty_n = caty_n +) +strat_uneqprob +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base +#> total 40 +#> +#> siteuse by stratum: +#> Base +#> high 15 +#> low 25 +#> +#> siteuse by caty: +#> Base +#> large 9 +#> small 31 +#> +#> siteuse by stratum:caty: +#> Base +#> high:large 3 +#> low:large 6 +#> high:small 12 +#> low:small 19
+

caty_n is now a list: the first element contains the +expected sample sizes for each area category in the low elevation +stratum, and the second element contains the expected sample sizes for +each area category in the high elevation stratum.

Proportional inclusion probabilities

-

To select a sample stratified by elevation category with probabilities proportional to lake area, run

+

To select a sample stratified by elevation category with +probabilities proportional to lake area, run

-strat_propprob <- grts(
-  NE_Lakes,
-  n_base = strata_n,
-  stratum_var = "ELEV_CAT",
-  aux_var = "AREA"
-)
+strat_propprob <- grts( + NE_Lakes, + n_base = strata_n, + stratum_var = "ELEV_CAT", + aux_var = "AREA" +)
@@ -285,211 +353,320 @@

Additional sampling options

Legacy sampling

-

Legacy (historical) sites are sites that were selected from a previous sampling design that incorporated randomness into site selection, are part of the sampling frame for the current sampling design, and should be always be selected in the current sample. The NE_Lakes_Legacy data contains some legacy sites. To accommodate these legacy sites while sampling, use the legacy_sites argument:

+

Legacy (historical) sites are sites that were selected from a +previous sampling design that incorporated randomness into site +selection, are part of the sampling frame for the current sampling +design, and should be always be selected in the current sample. The +NE_Lakes_Legacy data contains some legacy sites. To +accommodate these legacy sites while sampling, use the +legacy_sites argument:

-legacy <- grts(NE_Lakes, n_base = 50, legacy_sites = NE_Lakes_Legacy)
-legacy
-#> Summary of Site Counts: 
-#> 
-#>    total      siteuse  
-#>  total:50   Legacy: 5  
-#>             Base  :45
+legacy <- grts(NE_Lakes, n_base = 50, legacy_sites = NE_Lakes_Legacy) +legacy +#> Summary of Site Counts: +#> +#> total siteuse +#> total:50 Legacy: 5 +#> Base :45

To visualize the legacy and base sites together, run

-plot(legacy, key.width = lcm(3))
+plot(legacy, key.width = lcm(3))

-

These points can be overlain onto the sampling frame by including NE_Lakes in the plot() command.

-

Legacy sites are included in the base (main) sample, so the value for n_base should be equal to the number of legacy sites plus the number of non-legacy sites desired in the sample. If your sampling design uses stratification, unequal inclusion probabilities, or proportional inclusion probabilities, you need to name the column in legacy_sites that represents these values. By default, grts() assumes that the respective columns in sframe and legacy_sites share the same name – if this is not the case, use the legacy_stratum_var, legacy_caty_var or legacy_aux_var arguments. If your population is finite, you may alternatively accommodate legacy sites by including a variable in your sampling frame that indicates whether each site is a legacy site or not and then use the legacy_var argument in grts(). When using legacy_var, you do not need to use legacy_stratum_var, legacy_caty_var or legacy_aux_var.

-

Another legacy site example is provided in Infinite Population Sampling.

+

These points can be overlain onto the sampling frame by including +NE_Lakes in the plot() command.

+

Legacy sites are included in the base (main) sample, so the value for +n_base should be equal to the number of legacy sites plus +the number of non-legacy sites desired in the sample. If your sampling +design uses stratification, unequal inclusion probabilities, or +proportional inclusion probabilities, you need to name the column in +legacy_sites that represents these values. By default, +grts() assumes that the respective columns in +sframe and legacy_sites share the same name – +if this is not the case, use the legacy_stratum_var, +legacy_caty_var or legacy_aux_var arguments. +If your population is finite, you may alternatively accommodate legacy +sites by including a variable in your sampling frame that indicates +whether each site is a legacy site or not and then use the +legacy_var argument in grts(). When using +legacy_var, you do not need to use +legacy_stratum_var, legacy_caty_var or +legacy_aux_var.

+

Another legacy site example is provided in Infinite Population +Sampling.

Minimum distance sampling

-

Though the GRTS algorithm selects spatially balanced samples, the algorithm can select sites that are closer together than you may desire. To enforce a minimum distance between sites, run

+

Though the GRTS algorithm selects spatially balanced samples, the +algorithm can select sites that are closer together than you may desire. +To enforce a minimum distance between sites, run

-mindis <- grts(NE_Lakes, n_base = 50, mindis = 1600)
-

Here we have specified that sites be no closer together than 1600 meters (meters are the units of the sf object). In some situations, the grts() function will fail to enforce the minimum distance requirement for all sites. When this occurs, the function will enforce the requirement for as many sites as possible and then return a warning.

-

When stratifying, mindis will apply to all strata if it is a single value. It is possible to set stratum-specific minimum distance requirements by storing them in a named list (the names must match the strata):

+mindis <- grts(NE_Lakes, n_base = 50, mindis = 1600)
+

Here we have specified that sites be no closer together than 1600 +meters (meters are the units of the sf object). In some +situations, the grts() function will fail to enforce the +minimum distance requirement for all sites. When this occurs, the +function will enforce the requirement for as many sites as possible and +then return a warning.

+

When stratifying, mindis will apply to all strata if it +is a single value. It is possible to set stratum-specific minimum +distance requirements by storing them in a named list (the names must +match the strata):

-mindis_list <- list(low = 1400, high = 1000)
-strat_mindis <- grts(
-  NE_Lakes,
-  strata_n,
-  stratum_var = "ELEV_CAT",
-  mindis = mindis_list
-)
+mindis_list <- list(low = 1400, high = 1000) +strat_mindis <- grts( + NE_Lakes, + strata_n, + stratum_var = "ELEV_CAT", + mindis = mindis_list +)

Replacement sampling

-

Sometimes data cannot be collected at a site. This may occur for a variety of reasons, some of which include landowner denial or inaccessible sites. The grts() function has two options for selecting replacement sites, which are sites available to replace sites in the base (main) sample for which data cannot be collected.

+

Sometimes data cannot be collected at a site. This may occur for a +variety of reasons, some of which include landowner denial or +inaccessible sites. The grts() function has two options for +selecting replacement sites, which are sites available to replace sites +in the base (main) sample for which data cannot be collected.

Reverse hierarchical ordering

-

The first replacement site option is reverse hierarchical ordering (Stevens and Olsen, 2004). To select a base sample with reverse hierarchically ordered replacement sites, run

+

The first replacement site option is reverse hierarchical ordering +(Stevens and Olsen, 2004). To select a base sample with reverse +hierarchically ordered replacement sites, run

-rho_replace <- grts(NE_Lakes, n_base = 50, n_over = 25)
-rho_replace
-#> Summary of Site Counts: 
-#> 
-#>    total    siteuse  
-#>  total:75   Base:50  
-#>             Over:25
-

n_base indicates the desired sample size, and n_over indicates the number of replacement sites. Sites are first selected using the GRTS algorithm for a sample size of n_base + n_over. They are then determined as base sites or replacement sites in a way that preserves as much spatial balance as possible. The spatial balance of the base sites degrades as n_over increases, however, so it is important to choose a realistic value for n_over.

-

To visualize the base sites and reverse hierarchically ordered replacement sites, run

+rho_replace <- grts(NE_Lakes, n_base = 50, n_over = 25) +rho_replace +#> Summary of Site Counts: +#> +#> total siteuse +#> total:75 Base:50 +#> Over:25
+

n_base indicates the desired sample size, and +n_over indicates the number of replacement sites. Sites are +first selected using the GRTS algorithm for a sample size of +n_base + n_over. They are then determined as base sites or +replacement sites in a way that preserves as much spatial balance as +possible. The spatial balance of the base sites degrades as +n_over increases, however, so it is important to choose a +realistic value for n_over.

+

To visualize the base sites and reverse hierarchically ordered +replacement sites, run

-plot(rho_replace, key.width = lcm(3))
+plot(rho_replace, key.width = lcm(3))

-

When stratifying, n_over will apply to all strata if it is a single value. It is possible to set stratum-specific reverse hierarchical ordering requirements by storing them in a named list (the names must match the strata):

+

When stratifying, n_over will apply to all strata if it +is a single value. It is possible to set stratum-specific reverse +hierarchical ordering requirements by storing them in a named list (the +names must match the strata):

-over_list <- list(low = 2, high = 5)
-strat_rho_replace <- grts(
-  NE_Lakes,
-  strata_n,
-  stratum_var = "ELEV_CAT",
-  n_over = over_list
-)
-strat_rho_replace
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base Over
-#> total   40    7
-#> 
-#> siteuse by stratum: 
-#>      Base Over
-#> high   15    5
-#> low    25    2
+over_list <- list(low = 2, high = 5) +strat_rho_replace <- grts( + NE_Lakes, + strata_n, + stratum_var = "ELEV_CAT", + n_over = over_list +) +strat_rho_replace +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base Over +#> total 40 7 +#> +#> siteuse by stratum: +#> Base Over +#> high 15 5 +#> low 25 2

Nearest neighbor

-

The second replacement site option is nearest neighbor. To select a base sample with nearest neighbor replacement sites, run

+

The second replacement site option is nearest neighbor. To select a +base sample with nearest neighbor replacement sites, run

-nn_replace <- grts(NE_Lakes, n_base = 50, n_near = 1)
-nn_replace
-#> Summary of Site Counts: 
-#> 
-#>    total     siteuse  
-#>  total:100   Base:50  
-#>              Near:50
-

n_base indicates the desired sample size, and n_near indicates the number of replacement sites for each base site. For n_near = 1, each site in the base sample has a replacement site associated with it – this replacement site is the closest site (measured by Euclidean distance) to the base site (within a stratum).

-

When stratifying, n_near will apply to all strata if it is a single value. It is possible to set stratum-specific nearest neighbor requirements by storing them in a named list (the names must match the strata):

+nn_replace <- grts(NE_Lakes, n_base = 50, n_near = 1) +nn_replace +#> Summary of Site Counts: +#> +#> total siteuse +#> total:100 Base:50 +#> Near:50
+

n_base indicates the desired sample size, and +n_near indicates the number of replacement sites for each +base site. For n_near = 1, each site in the base sample has +a replacement site associated with it – this replacement site is the +closest site (measured by Euclidean distance) to the base site (within a +stratum).

+

When stratifying, n_near will apply to all strata if it +is a single value. It is possible to set stratum-specific nearest +neighbor requirements by storing them in a named list (the names must +match the strata):

-near_list <- list(low = 1, high = 2)
-strat_nn_replace <- grts(
-  NE_Lakes,
-  strata_n,
-  stratum_var = "ELEV_CAT",
-  n_near = near_list
-)
-strat_nn_replace
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base Near
-#> total   40   55
-#> 
-#> siteuse by stratum: 
-#>      Base Near
-#> high   15   30
-#> low    25   25
+near_list <- list(low = 1, high = 2) +strat_nn_replace <- grts( + NE_Lakes, + strata_n, + stratum_var = "ELEV_CAT", + n_near = near_list +) +strat_nn_replace +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base Near +#> total 40 55 +#> +#> siteuse by stratum: +#> Base Near +#> high 15 30 +#> low 25 25

Independent Random Sampling

-

If you want to select samples that are not spatially balanced, use spsurvey’s irs() function (IRS is for Independent Random Sampling). The function arguments for the irs() function are the same as the function arguments for the grts() function. This means that the flexible sampling design options available for the GRTS algorithm are also available for the IRS algorithm. To select an unstratified IRS sample where each site in the sampling frame has an equal inclusion probability, run

+

If you want to select samples that are not spatially balanced, use +spsurvey’s irs() function (IRS is for Independent Random +Sampling). The function arguments for the irs() function +are the same as the function arguments for the grts() +function. This means that the flexible sampling design options available +for the GRTS algorithm are also available for the IRS algorithm. To +select an unstratified IRS sample where each site in the sampling frame +has an equal inclusion probability, run

-eqprob_irs <- irs(NE_Lakes, n_base = 50)
-eqprob_irs
-#> Summary of Site Counts: 
-#> 
-#>    total    siteuse  
-#>  total:50   Base:50
+eqprob_irs <- irs(NE_Lakes, n_base = 50) +eqprob_irs +#> Summary of Site Counts: +#> +#> total siteuse +#> total:50 Base:50

To visualize the design sites, run

-plot(eqprob_irs, NE_Lakes, key.width = lcm(3))
+plot(eqprob_irs, NE_Lakes, key.width = lcm(3))

-

Notice how these IRS design sites appear less spread out than the design sites from the unstratified GRTS sample with equal inclusion probabilities.

-

To select an IRS sample stratified by elevation category with equal inclusion probabilities in each stratum, run

+

Notice how these IRS design sites appear less spread out than the +design sites from the unstratified GRTS sample with equal inclusion +probabilities.

+

To select an IRS sample stratified by elevation category with equal +inclusion probabilities in each stratum, run

-strata_n <- c(low = 25, high = 15)
-strat_eqprob_irs <- irs(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
-strat_eqprob_irs
-#> Summary of Site Counts: 
-#> 
-#> siteuse by total: 
-#>       Base
-#> total   40
-#> 
-#> siteuse by stratum: 
-#>      Base
-#> high   15
-#> low    25
+strata_n <- c(low = 25, high = 15) +strat_eqprob_irs <- irs(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT") +strat_eqprob_irs +#> Summary of Site Counts: +#> +#> siteuse by total: +#> Base +#> total 40 +#> +#> siteuse by stratum: +#> Base +#> high 15 +#> low 25

Measuring spatial balance

-

So far we have talked generally about spatial balance but not about how to measure it. Stevens and Olsen (2004) proposed measuring spatial balance using Voronoi polygons. A design site’s Voronoi polygon contains all sites in the sampling frame closer to this design site than any other design site. To measure spatial balance, these Voronoi polygons are combined with the inclusion probabilities of all sites in the sampling frame and a loss metric.

-

Several spatial balance metrics are available in spsurvey. The default is Pielou’s evenness index. Pielou’s evenness index ranges from 0 (perfect spatial balance) to 1 (a complete lack of spatial balance). Larger values of the Pielou’s evenness index indicate worse spatial balance. To measure the spatial balance of the unstratified GRTS and IRS samples with equal inclusion probabilities, run

+

So far we have talked generally about spatial balance but not about +how to measure it. Stevens and Olsen (2004) proposed measuring spatial +balance using Voronoi polygons. A design site’s Voronoi polygon contains +all sites in the sampling frame closer to this design site than any +other design site. To measure spatial balance, these Voronoi polygons +are combined with the inclusion probabilities of all sites in the +sampling frame and a loss metric.

+

Several spatial balance metrics are available in spsurvey. The +default is Pielou’s evenness index. Pielou’s evenness index ranges from +0 (perfect spatial balance) to 1 (a complete lack of spatial balance). +Larger values of the Pielou’s evenness index indicate worse spatial +balance. To measure the spatial balance of the unstratified GRTS and IRS +samples with equal inclusion probabilities, run

-sp_balance(eqprob$sites_base, NE_Lakes) # grts
-#>   stratum metric      value
-#> 1    None pielou 0.02585831
-sp_balance(eqprob_irs$sites_base, NE_Lakes) # irs
-#>   stratum metric      value
-#> 1    None pielou 0.03823327
-

The GRTS design sites are more spatially balanced than the IRS design sites.

-

In a stratified sample, spatial balance is calculated separately for each stratum:

+sp_balance(eqprob$sites_base, NE_Lakes) # grts +#> stratum metric value +#> 1 None pielou 0.02585831 +sp_balance(eqprob_irs$sites_base, NE_Lakes) # irs +#> stratum metric value +#> 1 None pielou 0.03823327
+

The GRTS design sites are more spatially balanced than the IRS design +sites.

+

In a stratified sample, spatial balance is calculated separately for +each stratum:

-sp_balance(strat_eqprob$sites_base, NE_Lakes, stratum_var = "ELEV_CAT") # grts
-#>   stratum metric      value
-#> 1     low pielou 0.03680620
-#> 2    high pielou 0.04543609
-sp_balance(strat_eqprob_irs$sites_base, NE_Lakes, stratum_var = "ELEV_CAT") # irs
-#>   stratum metric     value
-#> 1     low pielou 0.0415444
-#> 2    high pielou 0.1171086
-

The GRTS design sites are more spatially balanced than the IRS design sites in both strata.

+sp_balance(strat_eqprob$sites_base, NE_Lakes, stratum_var = "ELEV_CAT") # grts +#> stratum metric value +#> 1 low pielou 0.03680620 +#> 2 high pielou 0.04543609 +sp_balance(strat_eqprob_irs$sites_base, NE_Lakes, stratum_var = "ELEV_CAT") # irs +#> stratum metric value +#> 1 low pielou 0.0415444 +#> 2 high pielou 0.1171086 +

The GRTS design sites are more spatially balanced than the IRS design +sites in both strata.

Infinite population sampling

-

Though this vignette used a finite population (NE_Lakes) to illustrate the sampling options available in spsurvey, the same commands are used to sample from infinite populations. The Illinois_River sf object in spsurvey is a linear resource and the Lake_Ontario sf object in spsurvey is an areal resource – both are examples of infinite populations. To select a GRTS sample with equal inclusion probabilities from Illinois_River, run

+

Though this vignette used a finite population (NE_Lakes) +to illustrate the sampling options available in spsurvey, the same +commands are used to sample from infinite populations. The +Illinois_River sf object in spsurvey is a +linear resource and the Lake_Ontario sf object +in spsurvey is an areal resource – both are examples of infinite +populations. To select a GRTS sample with equal inclusion probabilities +from Illinois_River, run

-eqprob <- grts(Illinois_River, n_base = 50)
-

To visualize the design sites overlain onto the sampling frame, run

+eqprob <- grts(Illinois_River, n_base = 50)
+

To visualize the design sites overlain onto the sampling frame, +run

-plot(eqprob, Illinois_River, key.width = lcm(3))
+plot(eqprob, Illinois_River, key.width = lcm(3))

To accommodate the Illinois River legacy sites, run:

-legacy <- grts(Illinois_River, n_base = 50, legacy_sites = Illinois_River_Legacy)
-

To select a GRTS sample with equal inclusion probabilities from Lake_Ontario, run

+legacy <- grts(Illinois_River, n_base = 50, legacy_sites = Illinois_River_Legacy) +

To select a GRTS sample with equal inclusion probabilities from +Lake_Ontario, run

-eqprob <- grts(Lake_Ontario, n_base = 50)
-

To visualize the design sites (with closed circles) overlain onto the sampling frame, run

+eqprob <- grts(Lake_Ontario, n_base = 50) +

To visualize the design sites (with closed circles) overlain onto the +sampling frame, run

-plot(eqprob, Lake_Ontario, pch = 19, key.width = lcm(3))
+plot(eqprob, Lake_Ontario, pch = 19, key.width = lcm(3))

Binding design sites

-

To bind sites_legacy, sites_base, sites_over, sites_near (four separate sf objects) into a single sf object, use sp_rbind(). For example, to combine the base and reverse hierarchically ordered replacement sites from rho_replace into a single sf object, run

+

To bind sites_legacy, sites_base, +sites_over, sites_near (four separate +sf objects) into a single sf object, use +sp_rbind(). For example, to combine the base and reverse +hierarchically ordered replacement sites from rho_replace +into a single sf object, run

-combined <- sp_rbind(rho_replace)
-

Then it is straightforward to write out a single sf object using a function like sf::write_sf(). For example, to save the combined sf object as a shapefile named "file_name.shp" at the location on your machine called "file_path", run

+combined <- sp_rbind(rho_replace)
+

Then it is straightforward to write out a single sf +object using a function like sf::write_sf(). For example, +to save the combined sf object as a shapefile named +"file_name.shp" at the location on your machine called +"file_path", run

-write_sf(combined, "file_path/file_name.shp")
+write_sf(combined, "file_path/file_name.shp")

References

-

Olsen, A. R., T. M. Kincaid and Q. Payton (2012). Spatially balanced survey designs for natural resources. Design and Analysis of Long-Term Ecological Monitoring Studies. R. A. Gitzen, J. J. Millspaugh, A. B. Cooper and D. S. Licht. Cambridge, UK, Cambridge University Press: 126-150.

-

Stevens Jr, D. L. and Olsen, A. R. (2004). Spatially balanced sampling of natural resources. Journal of the American Statistical Association, 99(465):262-278.

+

Olsen, A. R., T. M. Kincaid and Q. Payton (2012). Spatially balanced +survey designs for natural resources. Design and Analysis of Long-Term +Ecological Monitoring Studies. R. A. Gitzen, J. J. Millspaugh, A. B. +Cooper and D. S. Licht. Cambridge, UK, Cambridge University Press: +126-150.

+

Stevens Jr, D. L. and Olsen, A. R. (2004). Spatially balanced +sampling of natural resources. Journal of the American Statistical +Association, 99(465):262-278.

@@ -510,7 +687,7 @@

References

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/articles/start-here.html b/docs/articles/start-here.html index a9eeeb5..9ed86cc 100644 --- a/docs/articles/start-here.html +++ b/docs/articles/start-here.html @@ -33,7 +33,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -88,7 +88,8 @@
+

to install the package. You only need to run this code once per +version of R.

+

After the spsurvey package is installed, load it into R each new R +session by running

+library(spsurvey)

Citation information

-

If you used spsurvey in your work, please cite it. You can view the most recent citation by running

+

If you used spsurvey in your work, please cite it. You can view the +most recent citation by running

-citation("spsurvey")
-#> 
-#> To cite spsurvey in publications use:
-#> 
-#>   Michael Dumelle, Tom Kincaid, Anthony R. Olsen, Marc Weber (2023).
-#>   spsurvey: Spatial Sampling Design and Analysis in R. Journal of
-#>   Statistical Software, 105(3), 1-29. doi:10.18637/jss.v105.i03
-#> 
-#> A BibTeX entry for LaTeX users is
-#> 
-#>   @Article{,
-#>     title = {{spsurvey}: Spatial Sampling Design and Analysis in {R}},
-#>     author = {Michael Dumelle and Tom Kincaid and Anthony R. Olsen and Marc Weber},
-#>     journal = {Journal of Statistical Software},
-#>     year = {2023},
-#>     volume = {105},
-#>     number = {3},
-#>     pages = {1--29},
-#>     doi = {10.18637/jss.v105.i03},
-#>   }
+citation("spsurvey") +#> +#> To cite spsurvey in publications use: +#> +#> Michael Dumelle, Tom Kincaid, Anthony R. Olsen, Marc Weber (2023). +#> spsurvey: Spatial Sampling Design and Analysis in R. Journal of +#> Statistical Software, 105(3), 1-29. doi:10.18637/jss.v105.i03 +#> +#> A BibTeX entry for LaTeX users is +#> +#> @Article{, +#> title = {{spsurvey}: Spatial Sampling Design and Analysis in {R}}, +#> author = {Michael Dumelle and Tom Kincaid and Anthony R. Olsen and Marc Weber}, +#> journal = {Journal of Statistical Software}, +#> year = {2023}, +#> volume = {105}, +#> number = {3}, +#> pages = {1--29}, +#> doi = {10.18637/jss.v105.i03}, +#> }

spsurvey terminology

-

spsurvey implements a design-based approach to statistical inference, with a focus on spatial data. There are a few terms helpful to define before we move forward with spsurvey, as these terms will be used throughout the vignettes and documentation:

+

spsurvey implements a design-based approach to statistical inference, +with a focus on spatial data. There are a few terms helpful to define +before we move forward with spsurvey, as these terms will be used +throughout the vignettes and documentation:

    -
  • Survey design: All aspects of a survey from establishment of a need for data to the production of final results.
  • -
  • Sampling frame: The set of all sites (i.e. units) from which a sample is selected.
  • -
  • Sampling design: The process by which sites are selected from the sampling frame.
  • -
  • Design sites: The set of sites selected from the sampling frame according to the sampling design. Sometimes the term “sample” is used to refer to a collection of design sites (e.g. the term “GRTS sample” more formally means “the collection of design sites selected using the GRTS algorithm according to the sampling design”).
  • -
  • Analysis data: Data collected at the design sites. This includes information from the survey design such as design weights, stratification variables, subpopulation variables, etc.
  • -
  • Analysis results: The results from an appropriate statistical analysis of the analysis data.
  • +
  • Survey design: All aspects of a survey from establishment of a need +for data to the production of final results.
  • +
  • Sampling frame: The set of all sites (i.e. units) from which a +sample is selected.
  • +
  • Sampling design: The process by which sites are selected from the +sampling frame.
  • +
  • Design sites: The set of sites selected from the sampling frame +according to the sampling design. Sometimes the term “sample” is used to +refer to a collection of design sites (e.g. the term “GRTS sample” more +formally means “the collection of design sites selected using the GRTS +algorithm according to the sampling design”).
  • +
  • Analysis data: Data collected at the design sites. This includes +information from the survey design such as design weights, +stratification variables, subpopulation variables, etc.
  • +
  • Analysis results: The results from an appropriate statistical +analysis of the analysis data.
@@ -153,95 +170,174 @@

Vignettes in spsurvey

There are three additional vignettes in spsurvey:

    -
  1. Exploratory Data Analysis: Summarizing and Visualizing Sampling Frames, Design Sites, and Analysis Data +
  2. Exploratory Data Analysis: Summarizing and Visualizing Sampling +Frames, Design Sites, and Analysis Data
  3. Spatially Balanced Sampling
      -
    • Use the grts() function to implement the Generalized Random Tessellation Stratified (GRTS) algorithm (Stevens and Olsen, 2004) to select spatially balanced samples
    • -
    • To view this vignette, run vignette("sampling", "spsurvey").
    • +
    • Use the grts() function to implement the Generalized +Random Tessellation Stratified (GRTS) algorithm (Stevens and Olsen, +2004) to select spatially balanced samples
    • +
    • To view this vignette, run +vignette("sampling", "spsurvey").
  4. Analyzing Data
-

These vignettes cover some of the core functions (and arguments within those functions) in spsurvey. To learn more about features of spsurvey that are not covered in these vignettes, we encourage you to read spsurvey’s documentation available for download here or viewable interactively on our website here. Help files for a particular function are viewable by running ?function_name after loading spsurvey. For example, to learn more about the grts() function, run ?grts.

+

These vignettes cover some of the core functions (and arguments +within those functions) in spsurvey. To learn more about features of +spsurvey that are not covered in these vignettes, we encourage you to +read spsurvey’s documentation available for download here or viewable +interactively on our website here. Help files for a +particular function are viewable by running ?function_name +after loading spsurvey. For example, to learn more about the +grts() function, run ?grts.

Installing a previous version of spsurvey

-

The version 5.0.0 update to spsurvey implemented many significant changes to existing functions. As a result, some of your old code may not run properly while using version 5.0.0. Though we recommend adapting your code to work with the version 5.0.0, you may also install a previous version of spsurvey. For information regarding the installation of previous version of R packages, please see the RStudio support page here. Additionally, old versions of spsurvey are also available for download in the release tags section of our GitHub repository here.

+

The version 5.0.0 update to spsurvey implemented many significant +changes to existing functions. As a result, some of your old code may +not run properly while using version 5.0.0. Though we recommend adapting +your code to work with the version 5.0.0, you may also install a +previous version of spsurvey. For information regarding the installation +of previous version of R packages, please see the RStudio support page +here. +Additionally, old versions of spsurvey are also available for download +in the release tags section of our GitHub repository here.

sf objects

-

The sampling functions in spsurvey (grts() and irs()) require that your sampling frame is an sf object. An sf object (shorthand for a “simple features” object) is an R object with a unique structure used to conveniently store spatial data. sf objects are constructed using the sf package (Pebesma, 2018). The sf package is loaded and installed alongside the spsurvey package, so you do not need to run install.packages("sf") or library(sf) to access the sf package if spsurvey is already installed and loaded. For more on the sf package, see here.

-

Next we discuss a few ways to construct sf objects in R. The first is to read a shapefile directly into R using sf::read_sf(). The second is to use the sf::st_sf() function or the sf::st_as_sf() function to combine an appropriate R object (most commonly a data frame) and an appropriate geometry object into an sf object. To illustrate one approach for turning a data frame into an sf object, we start with NE_Lakes_df, a data frame in spsurvey that contains variables and geographic coordinates (latitude and longitude coordinates) for lakes in the Northeastern United States. To turn NE_Lakes_df into NE_Lakes_geo, an sf object with geographic coordinates, run

+

The sampling functions in spsurvey (grts() and +irs()) require that your sampling frame is an +sf object. An sf object (shorthand for a +“simple features” object) is an R object with a unique structure used to +conveniently store spatial data. sf objects are constructed +using the sf package (Pebesma, 2018). The sf package is loaded and +installed alongside the spsurvey package, so you do not need to run +install.packages("sf") or library(sf) to +access the sf package if spsurvey is already installed and loaded. For +more on the sf package, see here.

+

Next we discuss a few ways to construct sf objects in R. +The first is to read a shapefile directly into R using +sf::read_sf(). The second is to use the +sf::st_sf() function or the sf::st_as_sf() +function to combine an appropriate R object (most commonly a data frame) +and an appropriate geometry object into an sf object. To +illustrate one approach for turning a data frame into an sf +object, we start with NE_Lakes_df, a data frame in spsurvey +that contains variables and geographic coordinates (latitude and +longitude coordinates) for lakes in the Northeastern United States. To +turn NE_Lakes_df into NE_Lakes_geo, an +sf object with geographic coordinates, run

-NE_Lakes_geo <- st_as_sf(NE_Lakes_df, coords = c("XCOORD", "YCOORD"), crs = 4326)
-NE_Lakes_geo
-#> Simple feature collection with 195 features and 4 fields
-#> Geometry type: POINT
-#> Dimension:     XY
-#> Bounding box:  xmin: -73.64778 ymin: 41.07065 xmax: -69.96715 ymax: 42.73616
-#> Geodetic CRS:  WGS 84
-#> First 10 features:
-#>         AREA AREA_CAT   ELEV ELEV_CAT                   geometry
-#> 1  10.648825    large 264.69     high POINT (-72.08896 42.55508)
-#> 2   2.504606    small 557.63     high POINT (-73.18199 42.36727)
-#> 3   3.979199    small  28.79      low POINT (-71.14074 42.15596)
-#> 4   1.645657    small 212.60     high   POINT (-73.06726 41.783)
-#> 5   7.489052    small 239.67     high  POINT (-72.2602 42.36255)
-#> 6  86.533725    large 195.37     high POINT (-71.74634 41.87624)
-#> 7   1.926996    small 158.96     high POINT (-73.48408 41.34238)
-#> 8   6.514217    small  29.26      low POINT (-73.25487 41.20551)
-#> 9   3.100221    small 204.62     high POINT (-72.20897 42.12512)
-#> 10  1.868094    small  78.77      low POINT (-72.70233 42.18012)
-

The coords argument to sf::st_as_sf specifies the columns in NE_Lakes_df that are the x-coordinates and y-coordinates. The crs argument specifies the coordinate reference system, which we discuss in more detail next.

+NE_Lakes_geo <- st_as_sf(NE_Lakes_df, coords = c("XCOORD", "YCOORD"), crs = 4326) +NE_Lakes_geo +#> Simple feature collection with 195 features and 4 fields +#> Geometry type: POINT +#> Dimension: XY +#> Bounding box: xmin: -73.64778 ymin: 41.07065 xmax: -69.96715 ymax: 42.73616 +#> Geodetic CRS: WGS 84 +#> First 10 features: +#> AREA AREA_CAT ELEV ELEV_CAT geometry +#> 1 10.648825 large 264.69 high POINT (-72.08896 42.55508) +#> 2 2.504606 small 557.63 high POINT (-73.18199 42.36727) +#> 3 3.979199 small 28.79 low POINT (-71.14074 42.15596) +#> 4 1.645657 small 212.60 high POINT (-73.06726 41.783) +#> 5 7.489052 small 239.67 high POINT (-72.2602 42.36255) +#> 6 86.533725 large 195.37 high POINT (-71.74634 41.87624) +#> 7 1.926996 small 158.96 high POINT (-73.48408 41.34238) +#> 8 6.514217 small 29.26 low POINT (-73.25487 41.20551) +#> 9 3.100221 small 204.62 high POINT (-72.20897 42.12512) +#> 10 1.868094 small 78.77 low POINT (-72.70233 42.18012)
+

The coords argument to sf::st_as_sf +specifies the columns in NE_Lakes_df that are the +x-coordinates and y-coordinates. The crs argument specifies +the coordinate reference system, which we discuss in more detail +next.

Coordinate reference systems

-

Spatial data and sf objects rely on coordinate reference systems. A coordinate reference system (CRS) provides a structure by which to identify unique locations on the Earth’s surface. CRSs are either geographic or projected. A geographic CRS uses longitude (east-west direction) and latitude (north-south direction) coordinates to represent location with respect to a specific ellipsoid or spheroid surface. Geographic CRSs are measured in degrees, not units like meters or feet – this has important consequences. For example, a one degree difference in latitude is different at different longitudes. Projected CRSs are measured in standard Cartesian coordinates with respect to a flat surface. They have x and y locations, an origin, and a unit of measurement (like meters or feet).

-

You can move between coordinate systems using sf::st_transform(). For example, we can transform NE_Lakes_geo (which uses a geographic CRS) to NE_Lakes (which uses a projected CRS) by running

+

Spatial data and sf objects rely on coordinate reference +systems. A coordinate reference system (CRS) provides a structure by +which to identify unique locations on the Earth’s surface. CRSs are +either geographic or projected. A geographic CRS uses longitude +(east-west direction) and latitude (north-south direction) coordinates +to represent location with respect to a specific ellipsoid or spheroid +surface. Geographic CRSs are measured in degrees, not units like +meters or feet – this has important consequences. For example, a +one degree difference in latitude is different at different longitudes. +Projected CRSs are measured in standard Cartesian coordinates with +respect to a flat surface. They have x and y locations, an origin, and a +unit of measurement (like meters or feet).

+

You can move between coordinate systems using +sf::st_transform(). For example, we can transform +NE_Lakes_geo (which uses a geographic CRS) to +NE_Lakes (which uses a projected CRS) by running

-NE_Lakes <- st_transform(NE_Lakes_geo, crs = 5070)
-NE_Lakes
-#> Simple feature collection with 195 features and 4 fields
-#> Geometry type: POINT
-#> Dimension:     XY
-#> Bounding box:  xmin: 1834001 ymin: 2225021 xmax: 2127632 ymax: 2449985
-#> Projected CRS: NAD83 / Conus Albers
-#> First 10 features:
-#>         AREA AREA_CAT   ELEV ELEV_CAT                geometry
-#> 1  10.648825    large 264.69     high POINT (1930929 2417191)
-#> 2   2.504606    small 557.63     high POINT (1849399 2375085)
-#> 3   3.979199    small  28.79      low POINT (2017323 2393723)
-#> 4   1.645657    small 212.60     high POINT (1874135 2313865)
-#> 5   7.489052    small 239.67     high POINT (1922712 2392868)
-#> 6  86.533725    large 195.37     high POINT (1977163 2350744)
-#> 7   1.926996    small 158.96     high POINT (1852292 2257784)
-#> 8   6.514217    small  29.26      low POINT (1874421 2247388)
-#> 9   3.100221    small 204.62     high POINT (1933352 2368181)
-#> 10  1.868094    small  78.77      low POINT (1892582 2364213)
-

CRSs in R have traditionally been stored using EPSG codes or proj4string values. This meant that in order to transform your coordinates from one CRS to another, you needed two EPSG codes or proj4string values, one for each CRS. Recent updates to R’s handling of spatial data follow GDAL and PROJ (more information available here), and CRSs in sf objects are stored in R as lists with two components: input, which contains information regarding the EPSG code and proj4string; and wkt, an open geospatial standard format. For more information on CRSs and EPSG codes, see Pebesma (2018) and Lovelace et al. (2019). To search for various CRSs and EPSG codes, see here and here.

-

spsurvey will use the CRS from your sf object, so it is your responsibility to make sure the sf object has an appropriate CRS. If the CRS is not specified correctly, you may get misleading results.

+NE_Lakes <- st_transform(NE_Lakes_geo, crs = 5070) +NE_Lakes +#> Simple feature collection with 195 features and 4 fields +#> Geometry type: POINT +#> Dimension: XY +#> Bounding box: xmin: 1834001 ymin: 2225021 xmax: 2127632 ymax: 2449985 +#> Projected CRS: NAD83 / Conus Albers +#> First 10 features: +#> AREA AREA_CAT ELEV ELEV_CAT geometry +#> 1 10.648825 large 264.69 high POINT (1930929 2417191) +#> 2 2.504606 small 557.63 high POINT (1849399 2375085) +#> 3 3.979199 small 28.79 low POINT (2017323 2393723) +#> 4 1.645657 small 212.60 high POINT (1874135 2313865) +#> 5 7.489052 small 239.67 high POINT (1922712 2392868) +#> 6 86.533725 large 195.37 high POINT (1977163 2350744) +#> 7 1.926996 small 158.96 high POINT (1852292 2257784) +#> 8 6.514217 small 29.26 low POINT (1874421 2247388) +#> 9 3.100221 small 204.62 high POINT (1933352 2368181) +#> 10 1.868094 small 78.77 low POINT (1892582 2364213)
+

CRSs in R have traditionally been stored using EPSG codes or +proj4string values. This meant that in order to transform +your coordinates from one CRS to another, you needed two EPSG codes or +proj4string values, one for each CRS. Recent updates to R’s +handling of spatial data follow GDAL and PROJ (more information +available here), and CRSs +in sf objects are stored in R as lists with two components: +input, which contains information regarding the EPSG code +and proj4string; and wkt, an open geospatial +standard format. For more information on CRSs and EPSG codes, see +Pebesma (2018) and Lovelace et al. (2019). To search for various CRSs +and EPSG codes, see here and here.

+

spsurvey will use the CRS from your sf object, so it is +your responsibility to make sure the sf object has an +appropriate CRS. If the CRS is not specified correctly, you may get +misleading results.

References

-

Lovelace, R., Nowosad, J., & Muenchow, J. (2019). Geocomputation with R. CRC Press.

-

Pebesma, E., (2018). Simple Features for R: Standardized Support for Spatial Vector Data. The R Journal, 10 (1):439-446. https://doi.org/10.32614/RJ-2018-009

-

Stevens Jr, D. L. and Olsen, A. R. (2004). Spatially balanced sampling of natural resources. Journal of the American Statistical Association, 99(465):262-278.

+

Lovelace, R., Nowosad, J., & Muenchow, J. (2019). Geocomputation +with R. CRC Press.

+

Pebesma, E., (2018). Simple Features for R: Standardized Support for +Spatial Vector Data. The R Journal, 10 (1):439-446. https://doi.org/10.32614/RJ-2018-009

+

Stevens Jr, D. L. and Olsen, A. R. (2004). Spatially balanced +sampling of natural resources. Journal of the American Statistical +Association, 99(465):262-278.

@@ -262,7 +358,7 @@

References

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/authors.html b/docs/authors.html index 6d59682..e23f158 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -123,7 +123,7 @@

Citation

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/index.html b/docs/index.html index 39de1fa..6bab6f2 100644 --- a/docs/index.html +++ b/docs/index.html @@ -33,7 +33,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -97,28 +97,28 @@

Installation
-# install the most recent approved version from CRAN
-install.packages("spsurvey")
-# load the most recent approved version from CRAN
-library(spsurvey)
+# install the most recent approved version from CRAN +install.packages("spsurvey") +# load the most recent approved version from CRAN +library(spsurvey)

You can install and load the most recent development version ofspsurvey from GitHub by running:

-# Installing from GitHub requires you first install the remotes package
-install.packages("remotes")
-
-# install the most recent development version from GitHub
-remotes::install_github("USEPA/spsurvey", ref = "main")
-# load the most recent development version from GitHub
-library(spsurvey)
+# Installing from GitHub requires you first install the remotes package +install.packages("remotes") + +# install the most recent development version from GitHub +remotes::install_github("USEPA/spsurvey", ref = "main") +# load the most recent development version from GitHub +library(spsurvey)

You can install the most recent development version of spsurvey from GitHub with package vignettes by running:

install the most recent development version from GitHub with package vignettes
 devtools::install_github("USEPA/spsurvey", build_vignettes=TRUE)

To view the vignettes in RStudio, run

-vignette("start-here", "spsurvey") # start with this vignette for an spsurvey overview
-vignette("EDA", "spsurvey") # for summaries and visualizations (exploratory data analysis)
-vignette("sampling", "spsurvey") # for spatially balanced sampling
-vignette("analysis", "spsurvey") # for analyzing data
+vignette("start-here", "spsurvey") # start with this vignette for an spsurvey overview +vignette("EDA", "spsurvey") # for summaries and visualizations (exploratory data analysis) +vignette("sampling", "spsurvey") # for spatially balanced sampling +vignette("analysis", "spsurvey") # for analyzing data

To view the vignettes in a web format, visit here.

Further detail regarding spsurvey is contained in the package’s documentation manual available for download here.

@@ -127,25 +127,25 @@

Citation

If you used spsurvey in your work, please cite it. You can view the most recent citation by running

-citation(package = "spsurvey")
-
#> To cite spsurvey in publications use:
-#> 
-#>   Michael Dumelle, Tom Kincaid, Anthony R. Olsen, Marc Weber (2023).
-#>   spsurvey: Spatial Sampling Design and Analysis in R. Journal of
-#>   Statistical Software, 105(3), 1-29. doi:10.18637/jss.v105.i03
-#> 
-#> A BibTeX entry for LaTeX users is
-#> 
-#>   @Article{,
-#>     title = {{spsurvey}: Spatial Sampling Design and Analysis in {R}},
-#>     author = {Michael Dumelle and Tom Kincaid and Anthony R. Olsen and Marc Weber},
-#>     journal = {Journal of Statistical Software},
-#>     year = {2023},
-#>     volume = {105},
-#>     number = {3},
-#>     pages = {1--29},
-#>     doi = {10.18637/jss.v105.i03},
-#>   }
+citation(package = "spsurvey") +
#> To cite spsurvey in publications use:
+#> 
+#>   Michael Dumelle, Tom Kincaid, Anthony R. Olsen, Marc Weber (2023).
+#>   spsurvey: Spatial Sampling Design and Analysis in R. Journal of
+#>   Statistical Software, 105(3), 1-29. doi:10.18637/jss.v105.i03
+#> 
+#> A BibTeX entry for LaTeX users is
+#> 
+#>   @Article{,
+#>     title = {{spsurvey}: Spatial Sampling Design and Analysis in {R}},
+#>     author = {Michael Dumelle and Tom Kincaid and Anthony R. Olsen and Marc Weber},
+#>     journal = {Journal of Statistical Software},
+#>     year = {2023},
+#>     volume = {105},
+#>     number = {3},
+#>     pages = {1--29},
+#>     doi = {10.18637/jss.v105.i03},
+#>   }

Package Contributions @@ -224,7 +224,7 @@

Dev status

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/news/index.html b/docs/news/index.html index 1cde687..991619b 100644 --- a/docs/news/index.html +++ b/docs/news/index.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -67,7 +67,16 @@

Changelog

- + +
+

Bug Fixes

+
  • Fixed a bug in revisit_dsgn() that prevented proper printing of panels when there were multiple panels.
  • +
  • Fixed a bug that prevented grts() and irs() from working properly with empty LINESTRING or POLYGON geometries.
  • +
  • Fixed a bug that prevented grts() and irs() from returning coordinates when the the geometry column of sframe was not "geometry" and legacy_sites was specified (#40).
  • +
+
+
+

Minor Updates

  • @@ -78,34 +87,46 @@

    Minor Updates

Bug Fixes

-
  • Fixed a bug that caused an erorr in grts() and irs() occurred when at least one variable name in sframe was named "siteID", "siteuse", "replsite", "lon_WGS84", "lat_WGS84", "stratum", "wgt", "ip", "caty", "aux", xcoord, ycoord, or idpts and the name of the geometry column in sframe was not named "geometry" (#32).
+
  • Fixed a bug that caused an error in grts() and irs() occurred when at least one variable name in sframe was named "siteID", "siteuse", "replsite", "lon_WGS84", "lat_WGS84", "stratum", "wgt", "ip", "caty", "aux", xcoord, ycoord, or idpts and the name of the geometry column in sframe was not named "geometry" (#32).
  • +

Minor Updates

-
+

Minor Updates

-
  • Added an sp_frame() function to create sp_frame objects for use with plot() and summary() for sampling frames and analysis data.
    • -sp_frame objects have class sp_frame.
  • +
    • Added an sp_frame() function to create sp_frame objects for use with plot() and summary() for sampling frames and analysis data. +
      • +sp_frame objects have class sp_frame.
      • +
    • Added an sp_unframe() function to transform sp_frame objects back into their original object type.
    • -
    • Objects output from grts() and irs() are now called sp_design objects instead of spdesign objects.
      • -sp_design objects have class sp_design.
    • +
    • Objects output from grts() and irs() are now called sp_design objects instead of spdesign objects. +
      • +sp_design objects have class sp_design.
      • +
    • -summary() now works with sp_frame objects and sp_design objects.
      • -sp_summary() yields equivalent summaries and is currently maintained for backwards compatibility (i.e., sp_summary() has not changed).
    • +summary() now works with sp_frame objects and sp_design objects. +
      • +sp_summary() yields equivalent summaries and is currently maintained for backwards compatibility (i.e., sp_summary() has not changed).
      • +
    • summary functions now also summarize sf object geometries if the name of the geometry list-column is provided in the summary formula.
    • -plot() now works with sp_frame objects and sp_design objects.
      • -sp_plot() yields equivalent plots and is currently maintained for backwards compatibility (i.e., sp_plot() has not changed).
    • +plot() now works with sp_frame objects and sp_design objects. +
      • +sp_plot() yields equivalent plots and is currently maintained for backwards compatibility (i.e., sp_plot() has not changed).
      • +
    • -plot() now works with sp_CDF objects that are output from the CDF element of cont_analysis().
      • -cdf_plot() yields equivalent plots and is currently maintained for backwards compatibility (i.e., cdf_plot() has not changed).
    • +plot() now works with sp_CDF objects that are output from the CDF element of cont_analysis(). +
      • +cdf_plot() yields equivalent plots and is currently maintained for backwards compatibility (i.e., cdf_plot() has not changed).
      • +
    • Updated print functions for summaries obtained via summary() or sp_summary() so that they are clearer and easier to read.
    • Added a print function for sp_design objects. Now, a summary of site counts by site type (Legacy, Base, Over Near) crossed by strata or unequal probability levels or both is provided.
    • Added stratum_var, caty_var, and aux_var elements to the design list in sp_design objects.
    • @@ -191,7 +212,8 @@

      Bug fixesspsurvey 5.0.12021-10-20

      Bug fix

      -
      • Addressed Solaris performance problems.
      +
      • Addressed Solaris performance problems.
      • +
@@ -207,10 +229,12 @@

Major Updates

Minor Updates

-
  • Several functions have undergone minor changes to syntax and scope.
+
  • Several functions have undergone minor changes to syntax and scope.
  • +

Breaking Changes

-
  • Several functions have changed (both inputs and outputs) and functions in version 5.0.0 are not always backwards compatible with functions from previous versions. Though we recommend users upgrade existing code to work with the current version’s simpler implementations, backwards compatibility can be achieved by downloading previous versions of spsurvey.
+
  • Several functions have changed (both inputs and outputs) and functions in version 5.0.0 are not always backwards compatible with functions from previous versions. Though we recommend users upgrade existing code to work with the current version’s simpler implementations, backwards compatibility can be achieved by downloading previous versions of spsurvey.
  • +
@@ -233,13 +257,15 @@
+ +

Bug fix

-
  • Fix to localmean.weight function to correct when class for matrix objects was causing an indicator variable to have length two rather than length one, which caused an error in a while statement.
+
  • Fix to localmean.weight function to correct when class for matrix objects was causing an indicator variable to have length two rather than length one, which caused an error in a while statement.
  • +
@@ -250,10 +276,12 @@

Breaking changes

Bug fixes

-
  • The new R code function named numLevels that determines the number of hierarchical levels for a generalized random-tessellation stratified (GRTS) survey design now includes code to ensure that the maximum number of levels (which is currently 11) is not bypassed when creating a survey design.

+
  • The new R code function named numLevels that determines the number of hierarchical levels for a generalized random-tessellation stratified (GRTS) survey design now includes code to ensure that the maximum number of levels (which is currently 11) is not bypassed when creating a survey design.
  • +

Modified function

-
  • Modified function input.check to check for missing values among the x-coordinates and y-coordinates for location.
+
  • Modified function input.check to check for missing values among the x-coordinates and y-coordinates for location.
  • +
@@ -264,7 +292,8 @@

New features

Bug fixes

-
  • Modified C function readDbfFile to avoid PROTECT errors.
+
  • Modified C function readDbfFile to avoid PROTECT errors.
  • +
@@ -281,10 +310,12 @@

Bug fixesspsurvey 3.3.0

New features

-
  • Inserted a SystemRequirements field in the DESCRIPTION file and modified functions grts, grtsarea, grtslin, grtspts, irs, irsarea, irslin, read.dbf, and read.shape to prevent the functions from being executed on big-endian processors.
+
  • Inserted a SystemRequirements field in the DESCRIPTION file and modified functions grts, grtsarea, grtslin, grtspts, irs, irsarea, irslin, read.dbf, and read.shape to prevent the functions from being executed on big-endian processors.
  • +

Bug fixes

-
  • Removed the C header file named order.h and replaced C functions readBigEndian and readLittleEndian with the version of those functions from spsurvey version 3.1.
+
  • Removed the C header file named order.h and replaced C functions readBigEndian and readLittleEndian with the version of those functions from spsurvey version 3.1.
  • +
@@ -310,10 +341,12 @@

Bug fixesspsurvey 3.1.0

New features

-
  • Modified vignettes to use data sets from the data directory for the package.
+
  • Modified vignettes to use data sets from the data directory for the package.
  • +

Bug fixes

-
  • Modified C functions to ensure that variables passed to function malloc are of type unsigned integer.
+
  • Modified C functions to ensure that variables passed to function malloc are of type unsigned integer.
  • +
@@ -334,16 +367,19 @@

Bug fixesspsurvey 2.7.0

New features

-
  • Added a cex.main argument to the cont.cdfplot and cdf.plot functions.
+
  • Added a cex.main argument to the cont.cdfplot and cdf.plot functions.
  • +

Bug fixes

-
  • Modified function warnprnt to use correct variable names in the output data frame so that partial matching warnings for the names are not generated.
+
  • Modified function warnprnt to use correct variable names in the output data frame so that partial matching warnings for the names are not generated.
  • +

New features

-
  • Created a data directory that contain rda versions of the data files used by vignettes.
+
  • Created a data directory that contain rda versions of the data files used by vignettes.
  • +

Bug fixes

  • Modified function change.analysis to allow analysis of surveys with no repeat visit sites.

  • @@ -355,7 +391,8 @@

    Bug fixesspsurvey 2.5.0

    New features

    -
    • Modified function grtspts and C function numLevels to calculate the sampling grid random shift values only once rather than each time the number of hierarchical levels is incremented.
    +
    • Modified function grtspts and C function numLevels to calculate the sampling grid random shift values only once rather than each time the number of hierarchical levels is incremented.
    • +

Bug fixes

  • Modified functions attrisk.analysis, cat.analysis, change.analysis, cont.analysis, and relrisk.analysis to replace NA values with FALSE in the indicator variables for subpopulations.

  • @@ -487,7 +524,8 @@

    Bug fixesspsurvey 1.5.0

    Bug fixes

    -
    • Modified C functions getRecordShapeSizes and lintFcn to accommodate Polyline shapefiles that have multiple parts.
    +
    • Modified C functions getRecordShapeSizes and lintFcn to accommodate Polyline shapefiles that have multiple parts.
    • +
@@ -519,16 +557,19 @@

New featuresspsurvey 1.2.0

New features

-
  • Created a function named sp2shape and a C function named writeShapeFilePolygon that convert objects created by package sp to ESRI shapefiles. Also, renamed the C function writeShapeFile to writeShapeFilePoint.
+
  • Created a function named sp2shape and a C function named writeShapeFilePolygon that convert objects created by package sp to ESRI shapefiles. Also, renamed the C function writeShapeFile to writeShapeFilePoint.
  • +

Bug fixes

-
  • Modified function irsarea and created a C function named getRecordIDs to ensure that an IRS sample is selected when argument type.frame is set to “area” in function irs.
+
  • Modified function irsarea and created a C function named getRecordIDs to ensure that an IRS sample is selected when argument type.frame is set to “area” in function irs.
  • +

New features

-
  • Modified the C functions so that the package can accommodate M-type shapefiles.
+
  • Modified the C functions so that the package can accommodate M-type shapefiles.
  • +

Bug fixes

  • Modified functions grts and irs to ensure that the levels of mdcaty (the variable in the attributes data frame that specifies the unequal probability category for each element in the frame) are equivalent to the names in caty.n (the vector of sample sizes for each category in mdcaty, which is required for each element of the design list for which the selection type is “Unequal”).

  • @@ -540,7 +581,8 @@

    Bug fixesspsurvey 1.0.0

    New features

    -
    • This is the original version of the package.
    +
    • This is the original version of the package.
    • +
@@ -556,7 +598,7 @@

New features
-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index a7c3b1c..a8007b4 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,10 +1,10 @@ -pandoc: 2.11.4 -pkgdown: 2.0.6 +pandoc: 2.19.2 +pkgdown: 2.0.7 pkgdown_sha: ~ articles: analysis: analysis.html EDA: EDA.html sampling: sampling.html start-here: start-here.html -last_built: 2023-05-16T23:11Z +last_built: 2024-01-09T17:57Z diff --git a/docs/reference/Illinois_River.html b/docs/reference/Illinois_River.html index eed8dd0..69d8327 100644 --- a/docs/reference/Illinois_River.html +++ b/docs/reference/Illinois_River.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Illinois River data

-
Illinois_River
+
Illinois_River
@@ -101,7 +101,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/Illinois_River_Legacy.html b/docs/reference/Illinois_River_Legacy.html index e69238b..f576a1c 100644 --- a/docs/reference/Illinois_River_Legacy.html +++ b/docs/reference/Illinois_River_Legacy.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Illinois River legacy data

-
Illinois_River_Legacy
+
Illinois_River_Legacy
@@ -101,7 +101,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/Lake_Ontario.html b/docs/reference/Lake_Ontario.html index e832f67..ab8b57e 100644 --- a/docs/reference/Lake_Ontario.html +++ b/docs/reference/Lake_Ontario.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Lake Ontario data

-
Lake_Ontario
+
Lake_Ontario
@@ -110,7 +110,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/NE_Lakes.html b/docs/reference/NE_Lakes.html index e9d8978..cabc479 100644 --- a/docs/reference/NE_Lakes.html +++ b/docs/reference/NE_Lakes.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

New England Lakes data

-
NE_Lakes
+
NE_Lakes
@@ -110,7 +110,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/NE_Lakes_Legacy.html b/docs/reference/NE_Lakes_Legacy.html index bf3c72f..1315ca5 100644 --- a/docs/reference/NE_Lakes_Legacy.html +++ b/docs/reference/NE_Lakes_Legacy.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -72,7 +72,7 @@

New England Lakes legacy data

-
NE_Lakes_Legacy
+
NE_Lakes_Legacy
@@ -108,7 +108,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/NE_Lakes_df.html b/docs/reference/NE_Lakes_df.html index 6077d0a..d2102b0 100644 --- a/docs/reference/NE_Lakes_df.html +++ b/docs/reference/NE_Lakes_df.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

New England Lakes data (as a data frame)

-
NE_Lakes_df
+
NE_Lakes_df
@@ -114,7 +114,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/NLA_PNW.html b/docs/reference/NLA_PNW.html index 7c0bf46..e9fe902 100644 --- a/docs/reference/NLA_PNW.html +++ b/docs/reference/NLA_PNW.html @@ -19,7 +19,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -76,7 +76,7 @@

NLA PNW data

-
NLA_PNW
+
NLA_PNW
@@ -124,7 +124,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/NRSA_EPA7.html b/docs/reference/NRSA_EPA7.html index 7889935..af7b155 100644 --- a/docs/reference/NRSA_EPA7.html +++ b/docs/reference/NRSA_EPA7.html @@ -19,7 +19,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -76,7 +76,7 @@

NRSA EPA7 data

-
NRSA_EPA7
+
NRSA_EPA7
@@ -127,7 +127,7 @@

Format

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/adjwgt.html b/docs/reference/adjwgt.html index 9a648fa..7da562d 100644 --- a/docs/reference/adjwgt.html +++ b/docs/reference/adjwgt.html @@ -20,7 +20,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -78,7 +78,7 @@

Adjust survey design weights by categories

-
adjwgt(wgt, wgtcat = NULL, framesize, sites = NULL)
+
adjwgt(wgt, wgtcat = NULL, framesize, sites = NULL)
@@ -125,20 +125,20 @@

Author

Examples

-
wgt <- runif(50)
-wgtcat <- rep(c("A", "B"), c(30, 20))
-framesize <- c(A = 15, B = 10)
-sites <- rep(rep(c(TRUE, FALSE), c(9, 1)), 5)
-adjwgt(wgt, wgtcat, framesize, sites)
-#>  [1] 0.043236942 0.303467182 1.203465906 0.005329816 0.026742781 0.929493721
-#>  [7] 0.550054450 1.106700002 0.856271032 0.000000000 0.725198992 0.147868667
-#> [13] 0.839568174 0.353124934 0.415406438 0.163085336 0.226615377 0.135662011
-#> [19] 1.041519884 0.000000000 0.675017306 0.233142048 0.366534191 0.306492615
-#> [25] 0.987999204 0.310199240 1.107077620 0.744478986 1.196247145 0.000000000
-#> [31] 0.588051104 0.879216137 1.241887296 0.584546511 0.760906513 0.052847179
-#> [37] 0.245573235 0.282495513 0.168751034 0.000000000 0.483873311 0.922223200
-#> [43] 0.647267182 0.215163713 0.816041558 0.820951051 0.103030270 0.418363640
-#> [49] 0.768811552 0.000000000
+    
wgt <- runif(50)
+wgtcat <- rep(c("A", "B"), c(30, 20))
+framesize <- c(A = 15, B = 10)
+sites <- rep(rep(c(TRUE, FALSE), c(9, 1)), 5)
+adjwgt(wgt, wgtcat, framesize, sites)
+#>  [1] 0.006620736 0.073309846 0.864082196 0.199606423 0.556244620 1.140772402
+#>  [7] 1.051511478 0.556287607 0.873322282 0.000000000 1.135491447 0.240088460
+#> [13] 0.301826224 1.170262970 0.485577078 0.823762985 0.025623222 0.239542816
+#> [19] 0.027051464 0.000000000 0.971423261 1.143696090 0.220204810 0.645162024
+#> [25] 0.632857303 0.071818203 0.099662452 0.484398260 0.959793342 0.000000000
+#> [31] 1.057573347 0.709004596 1.039502335 1.030638690 0.114893617 1.184803869
+#> [37] 0.343595618 0.483001105 0.488339185 0.000000000 0.455772868 1.066973356
+#> [43] 0.071710181 1.195244333 0.034006027 0.018601238 0.119242042 0.009342650
+#> [49] 0.577754943 0.000000000
 
@@ -153,7 +153,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/adjwgtNR.html b/docs/reference/adjwgtNR.html index 4d9facb..3039e27 100644 --- a/docs/reference/adjwgtNR.html +++ b/docs/reference/adjwgtNR.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -80,7 +80,7 @@

Adjust survey design weights for non-response by categories

-
adjwgtNR(wgt, MARClass, EvalStatus, TNRClass, TRClass)
+
adjwgtNR(wgt, MARClass, EvalStatus, TNRClass, TRClass)
@@ -133,20 +133,20 @@

Author

Examples

-
set.seed(5)
-wgt <- runif(40)
-MARClass <- rep(c("A", "B"), rep(20, 2))
-EvalStatus <- sample(c("Not_Target", "Target_Sampled", "Target_Not_Sampled"), 40, replace = TRUE)
-TNRClass <- "Target_Not_Sampled"
-TRClass <- "Target_Sampled"
-adjwgtNR(wgt, MARClass, EvalStatus, TNRClass, TRClass)
+    
set.seed(5)
+wgt <- runif(40)
+MARClass <- rep(c("A", "B"), rep(20, 2))
+EvalStatus <- sample(c("Not_Target", "Target_Sampled", "Target_Not_Sampled"), 40, replace = TRUE)
+TNRClass <- "Target_Not_Sampled"
+TRClass <- "Target_Sampled"
+adjwgtNR(wgt, MARClass, EvalStatus, TNRClass, TRClass)
 #>  [1] 0.0000000 1.0531091 1.4091418 0.4370921 0.0000000 1.0774517 0.8114191
 #>  [8] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.4893536 0.0000000
 #> [15] 0.0000000 0.0000000 0.0000000 1.3645626 0.0000000 1.2943413 1.3897962
 #> [22] 0.0000000 0.3299456 0.0000000 0.0000000 0.0000000 0.6828898 0.0000000
 #> [29] 0.0000000 1.4909182 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
 #> [36] 0.0000000 0.0000000 0.9333714 0.6208610 0.0000000
-# function that has an error check
+# function that has an error check
 
@@ -161,7 +161,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/ash1_wgt.html b/docs/reference/ash1_wgt.html index 31f3cee..7f22841 100644 --- a/docs/reference/ash1_wgt.html +++ b/docs/reference/ash1_wgt.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,14 +74,14 @@

Compute the average shifted histogram (ASH) for one-dimensional weighted dat
-
ash1_wgt(
-  x,
-  wgt = rep(1, length(x)),
-  m = 5,
-  nbin = 50,
-  ab = NULL,
-  support = "Continuous"
-)
+
ash1_wgt(
+  x,
+  wgt = rep(1, length(x)),
+  m = 5,
+  nbin = 50,
+  ab = NULL,
+  support = "Continuous"
+)
@@ -144,10 +144,10 @@

Author

Examples

-
x <- rnorm(100, 10, sqrt(10))
-wgt <- runif(100, 10, 100)
-rslt <- ash1_wgt(x, wgt)
-plot(rslt)
+    
x <- rnorm(100, 10, sqrt(10))
+wgt <- runif(100, 10, 100)
+rslt <- ash1_wgt(x, wgt)
+plot(rslt)
 
 
@@ -163,7 +163,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/attrisk_analysis.html b/docs/reference/attrisk_analysis.html index 5640df3..d800b31 100644 --- a/docs/reference/attrisk_analysis.html +++ b/docs/reference/attrisk_analysis.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,31 +84,31 @@

Attributable risk analysis

-
attrisk_analysis(
-  dframe,
-  vars_response,
-  vars_stressor,
-  response_levels = NULL,
-  stressor_levels = NULL,
-  subpops = NULL,
-  siteID = NULL,
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  conf = 95,
-  All_Sites = FALSE
-)
+
attrisk_analysis(
+  dframe,
+  vars_response,
+  vars_stressor,
+  response_levels = NULL,
+  stressor_levels = NULL,
+  subpops = NULL,
+  siteID = NULL,
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -504,27 +504,27 @@

Author

Examples

-
dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
-)
-myresponse <- c("RespVar1", "RespVar2")
-mystressor <- c("StressVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-attrisk_analysis(dframe,
-  vars_response = myresponse,
-  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum"
-)
+    
dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
+)
+myresponse <- c("RespVar1", "RespVar2")
+mystressor <- c("StressVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+attrisk_analysis(dframe,
+  vars_response = myresponse,
+  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum"
+)
 #>             Type Subpopulation Response  Stressor nResp     Estimate
 #> 1      All_Sites     All Sites RespVar1 StressVar   100 -0.146822490
 #> 2      All_Sites     All Sites RespVar2 StressVar   100 -0.029705172
@@ -574,7 +574,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cat_analysis.html b/docs/reference/cat_analysis.html index 0ce0144..b7ea43f 100644 --- a/docs/reference/cat_analysis.html +++ b/docs/reference/cat_analysis.html @@ -22,7 +22,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -82,29 +82,29 @@

Categorical variable analysis

-
cat_analysis(
-  dframe,
-  vars,
-  subpops = NULL,
-  siteID = NULL,
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  jointprob = "overton",
-  conf = 95,
-  All_Sites = FALSE
-)
+
cat_analysis(
+  dframe,
+  vars,
+  subpops = NULL,
+  siteID = NULL,
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  jointprob = "overton",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -423,27 +423,27 @@

Author

Examples

-
dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  CatVar = rep(c("north", "south", "east", "west"), 25),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
-)
-myvars <- c("CatVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = c("Good", "Poor"),
-  Total = c(4000, 1500)
-)
-cat_analysis(dframe,
-  vars = myvars, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize
-)
+    
dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  CatVar = rep(c("north", "south", "east", "west"), 25),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
+)
+myvars <- c("CatVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = c("Good", "Poor"),
+  Total = c(4000, 1500)
+)
+cat_analysis(dframe,
+  vars = myvars, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize
+)
 #>              Type Subpopulation Indicator Category nResp Estimate.P StdError.P
 #> 1       All_Sites     All Sites    CatVar     east    25   24.56407   3.655213
 #> 2       All_Sites     All Sites    CatVar    north    25   26.94039   3.655213
@@ -506,7 +506,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cdf_plot.html b/docs/reference/cdf_plot.html index 9c18cee..19694f4 100644 --- a/docs/reference/cdf_plot.html +++ b/docs/reference/cdf_plot.html @@ -19,7 +19,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -76,25 +76,25 @@

Plot a cumulative distribution function (CDF)

-
cdf_plot(
-  cdfest,
-  var = NULL,
-  subpop = NULL,
-  subpop_level = NULL,
-  units_cdf = "Percent",
-  type_cdf = "Continuous",
-  log = "",
-  xlab = NULL,
-  ylab = NULL,
-  ylab_r = NULL,
-  main = NULL,
-  legloc = NULL,
-  confcut = 0,
-  conflev = 95,
-  cex.main = 1.2,
-  cex.legend = 1,
-  ...
-)
+
cdf_plot(
+  cdfest,
+  var = NULL,
+  subpop = NULL,
+  subpop_level = NULL,
+  units_cdf = "Percent",
+  type_cdf = "Continuous",
+  log = "",
+  xlab = NULL,
+  ylab = NULL,
+  ylab_r = NULL,
+  main = NULL,
+  legloc = NULL,
+  confcut = 0,
+  conflev = 95,
+  cex.main = 1.2,
+  cex.legend = 1,
+  ...
+)
@@ -236,42 +236,42 @@

Author

Examples

-
if (FALSE) {
-dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  ContVar = rnorm(100, 10, 1),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
-)
-myvars <- c("ContVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = c("Good", "Poor"),
-  Total = c(4000, 1500)
-)
-myanalysis <- cont_analysis(dframe,
-  vars = myvars, subpops = mysubpops,
-  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize
-)
-keep <- with(myanalysis$CDF, Type == "Resource_Class" &
-  Subpopulation == "Good")
-par(mfrow = c(2, 1))
-cdf_plot(myanalysis$CDF[keep, ],
-  xlab = "ContVar",
-  ylab = "Percent of Stream Length", ylab_r = "Stream Length (km)",
-  main = "Estimates for Resource Class: Good"
-)
-cdf_plot(myanalysis$CDF[keep, ],
-  xlab = "ContVar",
-  ylab = "Percent of Stream Length", ylab_r = "Same",
-  main = "Estimates for Resource Class: Good"
-)
-}
+    
if (FALSE) {
+dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  ContVar = rnorm(100, 10, 1),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
+)
+myvars <- c("ContVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = c("Good", "Poor"),
+  Total = c(4000, 1500)
+)
+myanalysis <- cont_analysis(dframe,
+  vars = myvars, subpops = mysubpops,
+  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize
+)
+keep <- with(myanalysis$CDF, Type == "Resource_Class" &
+  Subpopulation == "Good")
+par(mfrow = c(2, 1))
+cdf_plot(myanalysis$CDF[keep, ],
+  xlab = "ContVar",
+  ylab = "Percent of Stream Length", ylab_r = "Stream Length (km)",
+  main = "Estimates for Resource Class: Good"
+)
+cdf_plot(myanalysis$CDF[keep, ],
+  xlab = "ContVar",
+  ylab = "Percent of Stream Length", ylab_r = "Same",
+  main = "Estimates for Resource Class: Good"
+)
+}
 
@@ -286,7 +286,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/change_analysis.html b/docs/reference/change_analysis.html index 685bedd..b675ff8 100644 --- a/docs/reference/change_analysis.html +++ b/docs/reference/change_analysis.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,34 +84,34 @@

Change analysis

-
change_analysis(
-  dframe,
-  vars_cat = NULL,
-  vars_cont = NULL,
-  test = "mean",
-  subpops = NULL,
-  surveyID = "surveyID",
-  survey_names = NULL,
-  siteID = "siteID",
-  weight = "weight",
-  revisitwgt = FALSE,
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  jointprob = "overton",
-  conf = 95,
-  All_Sites = FALSE
-)
+
change_analysis(
+  dframe,
+  vars_cat = NULL,
+  vars_cont = NULL,
+  test = "mean",
+  subpops = NULL,
+  surveyID = "surveyID",
+  survey_names = NULL,
+  siteID = "siteID",
+  weight = "weight",
+  revisitwgt = FALSE,
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  jointprob = "overton",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -816,25 +816,25 @@

Author

Examples

-
# Categorical variable example for three resource classes
-dframe <- data.frame(
-  surveyID = rep(c("Survey 1", "Survey 2"), c(100, 100)),
-  siteID = paste0("Site", 1:200),
-  wgt = runif(200, 10, 100),
-  xcoord = runif(200),
-  ycoord = runif(200),
-  stratum = rep(rep(c("Stratum 1", "Stratum 2"), c(2, 2)), 50),
-  CatVar = rep(c("North", "South"), 100),
-  All_Sites = rep("All Sites", 200),
-  Resource_Class = sample(c("Good", "Fair", "Poor"), 200, replace = TRUE)
-)
-myvars <- c("CatVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-change_analysis(dframe,
-  vars_cat = myvars, subpops = mysubpops,
-  surveyID = "surveyID", siteID = "siteID", weight = "wgt",
-  xcoord = "xcoord", ycoord = "ycoord", stratumID = "stratum"
-)
+    
# Categorical variable example for three resource classes
+dframe <- data.frame(
+  surveyID = rep(c("Survey 1", "Survey 2"), c(100, 100)),
+  siteID = paste0("Site", 1:200),
+  wgt = runif(200, 10, 100),
+  xcoord = runif(200),
+  ycoord = runif(200),
+  stratum = rep(rep(c("Stratum 1", "Stratum 2"), c(2, 2)), 50),
+  CatVar = rep(c("North", "South"), 100),
+  All_Sites = rep("All Sites", 200),
+  Resource_Class = sample(c("Good", "Fair", "Poor"), 200, replace = TRUE)
+)
+myvars <- c("CatVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+change_analysis(dframe,
+  vars_cat = myvars, subpops = mysubpops,
+  surveyID = "surveyID", siteID = "siteID", weight = "wgt",
+  xcoord = "xcoord", ycoord = "ycoord", stratumID = "stratum"
+)
 #> $catsum
 #>   Survey_1 Survey_2           Type Subpopulation Indicator Category DiffEst.P
 #> 1 Survey 1 Survey 2      All_Sites     All Sites    CatVar    North -1.474844
@@ -923,7 +923,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cont_analysis.html b/docs/reference/cont_analysis.html index 0e0240d..e02472e 100644 --- a/docs/reference/cont_analysis.html +++ b/docs/reference/cont_analysis.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -84,31 +84,31 @@

Continuous variable analysis

-
cont_analysis(
-  dframe,
-  vars,
-  subpops = NULL,
-  siteID = NULL,
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  jointprob = "overton",
-  conf = 95,
-  pctval = c(5, 10, 25, 50, 75, 90, 95),
-  statistics = c("CDF", "Pct", "Mean", "Total"),
-  All_Sites = FALSE
-)
+
cont_analysis(
+  dframe,
+  vars,
+  subpops = NULL,
+  siteID = NULL,
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  jointprob = "overton",
+  conf = 95,
+  pctval = c(5, 10, 25, 50, 75, 90, 95),
+  statistics = c("CDF", "Pct", "Mean", "Total"),
+  All_Sites = FALSE
+)
@@ -548,27 +548,27 @@

Author

Examples

-
dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  ContVar = rnorm(100, 10, 1),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
-)
-myvars <- c("ContVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = c("Good", "Poor"),
-  Total = c(4000, 1500)
-)
-cont_analysis(dframe,
-  vars = myvars, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize, statistics = "Mean"
-)
+    
dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  ContVar = rnorm(100, 10, 1),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
+)
+myvars <- c("ContVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = c("Good", "Poor"),
+  Total = c(4000, 1500)
+)
+cont_analysis(dframe,
+  vars = myvars, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize, statistics = "Mean"
+)
 #> $CDF
 #> NULL
 #> 
@@ -602,7 +602,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cont_cdfplot.html b/docs/reference/cont_cdfplot.html index 5e479ef..0758b45 100644 --- a/docs/reference/cont_cdfplot.html +++ b/docs/reference/cont_cdfplot.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -80,24 +80,24 @@

Create a PDF file containing cumulative distribution functions (CDF) plots
-
cont_cdfplot(
-  pdffile = "cdf2x2.pdf",
-  cdfest,
-  units_cdf = "Percent",
-  ind_type = rep("Continuous", nind),
-  log = rep("", nind),
-  xlab = NULL,
-  ylab = NULL,
-  ylab_r = NULL,
-  legloc = NULL,
-  cdf_page = 4,
-  width = 10,
-  height = 8,
-  confcut = 0,
-  cex.main = 1.2,
-  cex.legend = 1,
-  ...
-)
+
cont_cdfplot(
+  pdffile = "cdf2x2.pdf",
+  cdfest,
+  units_cdf = "Percent",
+  ind_type = rep("Continuous", nind),
+  log = rep("", nind),
+  xlab = NULL,
+  ylab = NULL,
+  ylab_r = NULL,
+  legloc = NULL,
+  cdf_page = 4,
+  width = 10,
+  height = 8,
+  confcut = 0,
+  cex.main = 1.2,
+  cex.legend = 1,
+  ...
+)
@@ -225,31 +225,31 @@

Author

Examples

-
if (FALSE) {
-dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  ContVar = rnorm(100, 10, 1),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
-)
-myvars <- c("ContVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = c("Good", "Poor"),
-  Total = c(4000, 1500)
-)
-myanalysis <- cont_analysis(dframe,
-  vars = myvars, subpops = mysubpops,
-  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize
-)
-cont_cdfplot("myanalysis.pdf", myanalysis$CDF, ylab_r = "Stream Length (km)")
-}
-
+    
if (FALSE) {
+dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  ContVar = rnorm(100, 10, 1),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
+)
+myvars <- c("ContVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = c("Good", "Poor"),
+  Total = c(4000, 1500)
+)
+myanalysis <- cont_analysis(dframe,
+  vars = myvars, subpops = mysubpops,
+  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize
+)
+cont_cdfplot("myanalysis.pdf", myanalysis$CDF, ylab_r = "Stream Length (km)")
+}
+
 
@@ -264,7 +264,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cont_cdftest.html b/docs/reference/cont_cdftest.html index b1fbfba..97bfb55 100644 --- a/docs/reference/cont_cdftest.html +++ b/docs/reference/cont_cdftest.html @@ -34,7 +34,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -106,30 +106,30 @@

Cumulative distribution function (CDF) inference for a probability survey
-
cont_cdftest(
-  dframe,
-  vars,
-  subpops = NULL,
-  surveyID = NULL,
-  siteID = "siteID",
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  jointprob = "overton",
-  testname = "adjWald",
-  nclass = 3
-)
+
cont_cdftest(
+  dframe,
+  vars,
+  subpops = NULL,
+  surveyID = NULL,
+  siteID = "siteID",
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  jointprob = "overton",
+  testname = "adjWald",
+  nclass = 3
+)
@@ -402,36 +402,36 @@

Author

Examples

-
n <- 200
-mysiteID <- paste("Site", 1:n, sep = "")
-dframe <- data.frame(
-  siteID = mysiteID,
-  wgt = runif(n, 10, 100),
-  xcoord = runif(n),
-  ycoord = runif(n),
-  stratum = rep(c("Stratum1", "Stratum2"), n / 2),
-  Resource_Class = sample(c("Agr", "Forest", "Urban"), n, replace = TRUE)
-)
-ContVar <- numeric(n)
-tst <- dframe$Resource_Class == "Agr"
-ContVar[tst] <- rnorm(sum(tst), 10, 1)
-tst <- dframe$Resource_Class == "Forest"
-ContVar[tst] <- rnorm(sum(tst), 10.1, 1)
-tst <- dframe$Resource_Class == "Urban"
-ContVar[tst] <- rnorm(sum(tst), 10.5, 1)
-dframe$ContVar <- ContVar
-myvars <- c("ContVar")
-mysubpops <- c("Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = rep(c("Agr", "Forest", "Urban"), rep(2, 3)),
-  stratum = rep(c("Stratum1", "Stratum2"), 3),
-  Total = c(2500, 1500, 1000, 500, 600, 450)
-)
-cont_cdftest(dframe,
-  vars = myvars, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize, testname = "RaoScott_First"
-)
+    
n <- 200
+mysiteID <- paste("Site", 1:n, sep = "")
+dframe <- data.frame(
+  siteID = mysiteID,
+  wgt = runif(n, 10, 100),
+  xcoord = runif(n),
+  ycoord = runif(n),
+  stratum = rep(c("Stratum1", "Stratum2"), n / 2),
+  Resource_Class = sample(c("Agr", "Forest", "Urban"), n, replace = TRUE)
+)
+ContVar <- numeric(n)
+tst <- dframe$Resource_Class == "Agr"
+ContVar[tst] <- rnorm(sum(tst), 10, 1)
+tst <- dframe$Resource_Class == "Forest"
+ContVar[tst] <- rnorm(sum(tst), 10.1, 1)
+tst <- dframe$Resource_Class == "Urban"
+ContVar[tst] <- rnorm(sum(tst), 10.5, 1)
+dframe$ContVar <- ContVar
+myvars <- c("ContVar")
+mysubpops <- c("Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = rep(c("Agr", "Forest", "Urban"), rep(2, 3)),
+  stratum = rep(c("Stratum1", "Stratum2"), 3),
+  Total = c(2500, 1500, 1000, 500, 600, 450)
+)
+cont_cdftest(dframe,
+  vars = myvars, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize, testname = "RaoScott_First"
+)
 #>             Type Subpopulation_1 Subpopulation_2 Indicator
 #> 1 Resource_Class             Agr          Forest   ContVar
 #> 2 Resource_Class             Agr           Urban   ContVar
@@ -454,7 +454,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/cov_panel_dsgn.html b/docs/reference/cov_panel_dsgn.html index a608f6c..4a4a37a 100644 --- a/docs/reference/cov_panel_dsgn.html +++ b/docs/reference/cov_panel_dsgn.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -80,16 +80,16 @@

Create a covariance matrix for a panel design

-
cov_panel_dsgn(
-  paneldsgn = matrix(50, 1, 10),
-  nrepeats = 1,
-  unit_var = NULL,
-  period_var = NULL,
-  unitperiod_var = NULL,
-  index_var = NULL,
-  unit_rho = 1,
-  period_rho = 0
-)
+
cov_panel_dsgn(
+  paneldsgn = matrix(50, 1, 10),
+  nrepeats = 1,
+  unit_var = NULL,
+  period_var = NULL,
+  unitperiod_var = NULL,
+  index_var = NULL,
+  unit_rho = 1,
+  period_rho = 0
+)
@@ -202,7 +202,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/diffrisk_analysis.html b/docs/reference/diffrisk_analysis.html index 403dafe..f490ef5 100644 --- a/docs/reference/diffrisk_analysis.html +++ b/docs/reference/diffrisk_analysis.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,31 +84,31 @@

Risk difference analysis

-
diffrisk_analysis(
-  dframe,
-  vars_response,
-  vars_stressor,
-  response_levels = NULL,
-  stressor_levels = NULL,
-  subpops = NULL,
-  siteID = NULL,
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  conf = 95,
-  All_Sites = FALSE
-)
+
diffrisk_analysis(
+  dframe,
+  vars_response,
+  vars_stressor,
+  response_levels = NULL,
+  stressor_levels = NULL,
+  subpops = NULL,
+  siteID = NULL,
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -504,27 +504,27 @@

Author

Examples

-
dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
-)
-myresponse <- c("RespVar1", "RespVar2")
-mystressor <- c("StressVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-diffrisk_analysis(dframe,
-  vars_response = myresponse,
-  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum"
-)
+    
dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
+)
+myresponse <- c("RespVar1", "RespVar2")
+mystressor <- c("StressVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+diffrisk_analysis(dframe,
+  vars_response = myresponse,
+  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum"
+)
 #>             Type Subpopulation Response  Stressor nResp     Estimate
 #> 1      All_Sites     All Sites RespVar1 StressVar   100 -0.037973184
 #> 2      All_Sites     All Sites RespVar2 StressVar   100  0.080765266
@@ -574,7 +574,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/errorprnt.html b/docs/reference/errorprnt.html index bfd74c9..227f1ee 100644 --- a/docs/reference/errorprnt.html +++ b/docs/reference/errorprnt.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -72,7 +72,7 @@

Print errors from analysis functions

-
errorprnt(error_vec = get("error_vec", envir = .GlobalEnv))
+
errorprnt(error_vec = get("error_vec", envir = .GlobalEnv))
@@ -106,7 +106,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/grts.html b/docs/reference/grts.html index 8c78413..cb0f17f 100644 --- a/docs/reference/grts.html +++ b/docs/reference/grts.html @@ -25,7 +25,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -88,30 +88,30 @@

Select a generalized random tessellation stratified (GRTS) sample

-
grts(
-  sframe,
-  n_base,
-  stratum_var = NULL,
-  seltype = NULL,
-  caty_var = NULL,
-  caty_n = NULL,
-  aux_var = NULL,
-  legacy_var = NULL,
-  legacy_sites = NULL,
-  legacy_stratum_var = NULL,
-  legacy_caty_var = NULL,
-  legacy_aux_var = NULL,
-  mindis = NULL,
-  maxtry = 10,
-  n_over = NULL,
-  n_near = NULL,
-  wgt_units = NULL,
-  pt_density = NULL,
-  DesignID = "Site",
-  SiteBegin = 1,
-  sep = "-",
-  projcrs_check = TRUE
-)
+
grts(
+  sframe,
+  n_base,
+  stratum_var = NULL,
+  seltype = NULL,
+  caty_var = NULL,
+  caty_n = NULL,
+  aux_var = NULL,
+  legacy_var = NULL,
+  legacy_sites = NULL,
+  legacy_stratum_var = NULL,
+  legacy_caty_var = NULL,
+  legacy_aux_var = NULL,
+  mindis = NULL,
+  maxtry = 10,
+  n_over = NULL,
+  n_near = NULL,
+  wgt_units = NULL,
+  pt_density = NULL,
+  DesignID = "Site",
+  SiteBegin = 1,
+  sep = "-",
+  projcrs_check = TRUE
+)
@@ -209,7 +209,7 @@

Arguments

geometry representing the legacy sites. spsurvey assumes that the legacy sites were selected from a previous sampling design that incorporated randomness into site selection and that the legacy sites -are elements of the current sampling frame. If sframe has a +are elements of the current sampling frame. If sframe has a POINT or MULTIPOINT geometry, the observations in legacy_sites should not also be in sframe (i.e., duplicates are not removed). Thus, sframe and legacy_sites together compose the current sampling frame. If m or z values @@ -481,15 +481,15 @@

Author

Examples

-
if (FALSE) {
-samp <- grts(NE_Lakes, n_base = 100)
-print(samp)
-strata_n <- c(low = 25, high = 30)
-samp_strat <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
-print(samp_strat)
-samp_over <- grts(NE_Lakes, n_base = 30, n_over = 5)
-print(samp_over)
-}
+    
if (FALSE) {
+samp <- grts(NE_Lakes, n_base = 100)
+print(samp)
+strata_n <- c(low = 25, high = 30)
+samp_strat <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
+print(samp_strat)
+samp_over <- grts(NE_Lakes, n_base = 30, n_over = 5)
+print(samp_over)
+}
 
@@ -504,7 +504,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/index.html b/docs/reference/index.html index d3dc8c9..7d62c89 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -262,7 +262,7 @@

All functions
-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/irs.html b/docs/reference/irs.html index 0952113..2a09689 100644 --- a/docs/reference/irs.html +++ b/docs/reference/irs.html @@ -24,7 +24,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -86,30 +86,30 @@

Select an independent random sample (IRS)

-
irs(
-  sframe,
-  n_base,
-  stratum_var = NULL,
-  seltype = NULL,
-  caty_var = NULL,
-  caty_n = NULL,
-  aux_var = NULL,
-  legacy_var = NULL,
-  legacy_sites = NULL,
-  legacy_stratum_var = NULL,
-  legacy_caty_var = NULL,
-  legacy_aux_var = NULL,
-  mindis = NULL,
-  maxtry = 10,
-  n_over = NULL,
-  n_near = NULL,
-  wgt_units = NULL,
-  pt_density = NULL,
-  DesignID = "Site",
-  SiteBegin = 1,
-  sep = "-",
-  projcrs_check = TRUE
-)
+
irs(
+  sframe,
+  n_base,
+  stratum_var = NULL,
+  seltype = NULL,
+  caty_var = NULL,
+  caty_n = NULL,
+  aux_var = NULL,
+  legacy_var = NULL,
+  legacy_sites = NULL,
+  legacy_stratum_var = NULL,
+  legacy_caty_var = NULL,
+  legacy_aux_var = NULL,
+  mindis = NULL,
+  maxtry = 10,
+  n_over = NULL,
+  n_near = NULL,
+  wgt_units = NULL,
+  pt_density = NULL,
+  DesignID = "Site",
+  SiteBegin = 1,
+  sep = "-",
+  projcrs_check = TRUE
+)
@@ -207,7 +207,7 @@

Arguments

geometry representing the legacy sites. spsurvey assumes that the legacy sites were selected from a previous sampling design that incorporated randomness into site selection and that the legacy sites -are elements of the current sampling frame. If sframe has a +are elements of the current sampling frame. If sframe has a POINT or MULTIPOINT geometry, the observations in legacy_sites should not also be in sframe (i.e., duplicates are not removed). Thus, sframe and legacy_sites together compose the current sampling frame. If m or z values @@ -474,15 +474,15 @@

Author

Examples

-
if (FALSE) {
-samp <- irs(NE_Lakes, n_base = 100)
-print(samp)
-strata_n <- c(low = 25, high = 30)
-samp_strat <- irs(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
-print(samp_strat)
-samp_over <- irs(NE_Lakes, n_base = 30, n_over = 5)
-print(samp_over)
-}
+    
if (FALSE) {
+samp <- irs(NE_Lakes, n_base = 100)
+print(samp)
+strata_n <- c(low = 25, high = 30)
+samp_strat <- irs(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
+print(samp_strat)
+samp_over <- irs(NE_Lakes, n_base = 30, n_over = 5)
+print(samp_over)
+}
 
@@ -497,7 +497,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/localmean_cov.html b/docs/reference/localmean_cov.html index d2c2862..2a1a46b 100644 --- a/docs/reference/localmean_cov.html +++ b/docs/reference/localmean_cov.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Internal Function: Variance-Covariance Matrix Based on Local Mean Estimator<
-
localmean_cov(zmat, weight_1st)
+
localmean_cov(zmat, weight_1st)
@@ -113,7 +113,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/localmean_var.html b/docs/reference/localmean_var.html index 339f6a0..b77af0a 100644 --- a/docs/reference/localmean_var.html +++ b/docs/reference/localmean_var.html @@ -17,7 +17,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -72,7 +72,7 @@

Internal Function: Local Mean Variance Estimator

-
localmean_var(z, weight_1st)
+
localmean_var(z, weight_1st)
@@ -111,7 +111,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/localmean_weight.html b/docs/reference/localmean_weight.html index bebc451..3220de9 100644 --- a/docs/reference/localmean_weight.html +++ b/docs/reference/localmean_weight.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Internal Function: Local Mean Variance Neighbors and Weights

-
localmean_weight(x, y, prb, nbh = 4)
+
localmean_weight(x, y, prb, nbh = 4)
@@ -123,7 +123,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/pd_summary.html b/docs/reference/pd_summary.html index fa9db6b..6654434 100644 --- a/docs/reference/pd_summary.html +++ b/docs/reference/pd_summary.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -80,7 +80,7 @@

Summary characteristics of a panel revisit design

-
pd_summary(object, visitdsgn = NULL, ...)
+
pd_summary(object, visitdsgn = NULL, ...)
@@ -151,12 +151,12 @@

Author

Examples

-
# Serially alternating panel revisit design summary
-sa_dsgn <- revisit_dsgn(20, panels = list(SA60N = list(
-  n = 60, pnl_dsgn = c(1, 4),
-  pnl_n = NA, start_option = "None"
-)), begin = 1)
-pd_summary(sa_dsgn)
+    
# Serially alternating panel revisit design summary
+sa_dsgn <- revisit_dsgn(20, panels = list(SA60N = list(
+  n = 60, pnl_dsgn = c(1, 4),
+  pnl_n = NA, start_option = "None"
+)), begin = 1)
+pd_summary(sa_dsgn)
 #> $n_panel
 #> [1] 5
 #> 
@@ -182,11 +182,11 @@ 

Examples

#> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 #> 60 120 180 240 300 300 300 300 300 300 300 300 300 300 300 300 300 300 300 300 #> -# Add visit design where first panel is sampled twice at every time period -sa_visit <- sa_dsgn -sa_visit[sa_visit > 0] <- 1 -sa_visit[1, sa_visit[1, ] > 0] <- 2 -pd_summary(sa_dsgn, sa_visit) +# Add visit design where first panel is sampled twice at every time period +sa_visit <- sa_dsgn +sa_visit[sa_visit > 0] <- 1 +sa_visit[1, sa_visit[1, ] > 0] <- 2 +pd_summary(sa_dsgn, sa_visit) #> $n_panel #> [1] 5 #> @@ -226,7 +226,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/plot.html b/docs/reference/plot.html index e4a8568..3be21f3 100644 --- a/docs/reference/plot.html +++ b/docs/reference/plot.html @@ -25,7 +25,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -81,41 +81,41 @@

Plot sampling frames, design sites, and analysis data.

are of the distributions of the right-hand side variables. If the left-hand side of the variable contains a variable, plots are of the left-hand size variable for each level of each right-hand side variable. -This function is largely built on plot.sf(), and all spsurvey plotting -methods can supply additional arguments to plot.sf(). For more information on +This function is largely built on plot.sf(), and all spsurvey plotting +methods can supply additional arguments to plot.sf(). For more information on plotting in sf, run ?sf::plot.sf(). Equivalent to sp_plot(); both are currently maintained for backwards compatibility.

-
# S3 method for sp_frame
-plot(
-  x,
-  formula = ~1,
-  xcoord,
-  ycoord,
-  crs,
-  var_args = NULL,
-  varlevel_args = NULL,
-  geom = FALSE,
-  onlyshow = NULL,
-  fix_bbox = TRUE,
-  ...
-)
-
-# S3 method for sp_design
-plot(
-  x,
-  sframe = NULL,
-  formula = ~siteuse,
-  siteuse = NULL,
-  var_args = NULL,
-  varlevel_args = NULL,
-  geom = FALSE,
-  onlyshow = NULL,
-  fix_bbox = TRUE,
-  ...
-)
+
# S3 method for sp_frame
+plot(
+  x,
+  formula = ~1,
+  xcoord,
+  ycoord,
+  crs,
+  var_args = NULL,
+  varlevel_args = NULL,
+  geom = FALSE,
+  onlyshow = NULL,
+  fix_bbox = TRUE,
+  ...
+)
+
+# S3 method for sp_design
+plot(
+  x,
+  sframe = NULL,
+  formula = ~siteuse,
+  siteuse = NULL,
+  var_args = NULL,
+  varlevel_args = NULL,
+  geom = FALSE,
+  onlyshow = NULL,
+  fix_bbox = TRUE,
+  ...
+)
@@ -198,7 +198,7 @@

Arguments

...
-

Additional arguments to pass to plot.sf().

+

Additional arguments to pass to plot.sf().

sframe
@@ -225,13 +225,13 @@

Author

Examples

-
if (FALSE) {
-data("NE_Lakes")
-NE_Lakes <- sp_frame(NE_Lakes)
-plot(NE_Lakes, formula = ~ELEV_CAT)
-sample <- grts(NE_Lakes, 30)
-plot(sample, NE_Lakes)
-}
+    
if (FALSE) {
+data("NE_Lakes")
+NE_Lakes <- sp_frame(NE_Lakes)
+plot(NE_Lakes, formula = ~ELEV_CAT)
+sample <- grts(NE_Lakes, 30)
+plot(sample, NE_Lakes)
+}
 
@@ -246,7 +246,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/plot.sp_CDF.html b/docs/reference/plot.sp_CDF.html index 224d42c..3b6bde2 100644 --- a/docs/reference/plot.sp_CDF.html +++ b/docs/reference/plot.sp_CDF.html @@ -20,7 +20,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -78,26 +78,26 @@

Plot a cumulative distribution function (CDF)

-
# S3 method for sp_CDF
-plot(
-  x,
-  var = NULL,
-  subpop = NULL,
-  subpop_level = NULL,
-  units_cdf = "Percent",
-  type_cdf = "Continuous",
-  log = "",
-  xlab = NULL,
-  ylab = NULL,
-  ylab_r = NULL,
-  main = NULL,
-  legloc = NULL,
-  confcut = 0,
-  conflev = 95,
-  cex.main = 1.2,
-  cex.legend = 1,
-  ...
-)
+
# S3 method for sp_CDF
+plot(
+  x,
+  var = NULL,
+  subpop = NULL,
+  subpop_level = NULL,
+  units_cdf = "Percent",
+  type_cdf = "Continuous",
+  log = "",
+  xlab = NULL,
+  ylab = NULL,
+  ylab_r = NULL,
+  main = NULL,
+  legloc = NULL,
+  confcut = 0,
+  conflev = 95,
+  cex.main = 1.2,
+  cex.legend = 1,
+  ...
+)
@@ -239,42 +239,42 @@

Author

Examples

-
if (FALSE) {
-dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  ContVar = rnorm(100, 10, 1),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
-)
-myvars <- c("ContVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-mypopsize <- data.frame(
-  Resource_Class = c("Good", "Poor"),
-  Total = c(4000, 1500)
-)
-myanalysis <- cont_analysis(dframe,
-  vars = myvars, subpops = mysubpops,
-  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum", popsize = mypopsize
-)
-keep <- with(myanalysis$CDF, Type == "Resource_Class" &
-  Subpopulation == "Good")
-par(mfrow = c(2, 1))
-plot(myanalysis$CDF[keep, ],
-  xlab = "ContVar",
-  ylab = "Percent of Stream Length", ylab_r = "Stream Length (km)",
-  main = "Estimates for Resource Class: Good"
-)
-plot(myanalysis$CDF[keep, ],
-  xlab = "ContVar",
-  ylab = "Percent of Stream Length", ylab_r = "Same",
-  main = "Estimates for Resource Class: Good"
-)
-}
+    
if (FALSE) {
+dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  ContVar = rnorm(100, 10, 1),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Good", "Poor"), c(55, 45))
+)
+myvars <- c("ContVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+mypopsize <- data.frame(
+  Resource_Class = c("Good", "Poor"),
+  Total = c(4000, 1500)
+)
+myanalysis <- cont_analysis(dframe,
+  vars = myvars, subpops = mysubpops,
+  siteID = "siteID", weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum", popsize = mypopsize
+)
+keep <- with(myanalysis$CDF, Type == "Resource_Class" &
+  Subpopulation == "Good")
+par(mfrow = c(2, 1))
+plot(myanalysis$CDF[keep, ],
+  xlab = "ContVar",
+  ylab = "Percent of Stream Length", ylab_r = "Stream Length (km)",
+  main = "Estimates for Resource Class: Good"
+)
+plot(myanalysis$CDF[keep, ],
+  xlab = "ContVar",
+  ylab = "Percent of Stream Length", ylab_r = "Same",
+  main = "Estimates for Resource Class: Good"
+)
+}
 
@@ -289,7 +289,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/power_dsgn.html b/docs/reference/power_dsgn.html index 77df7ab..9f35d7c 100644 --- a/docs/reference/power_dsgn.html +++ b/docs/reference/power_dsgn.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -80,23 +80,23 @@

Power calculation for multiple panel designs

-
power_dsgn(
-  ind_names,
-  ind_values,
-  unit_var,
-  period_var,
-  unitperiod_var,
-  index_var,
-  unit_rho = 1,
-  period_rho = 0,
-  paneldsgn,
-  nrepeats = NULL,
-  trend_type = "mean",
-  ind_pct = NULL,
-  ind_tail = NULL,
-  trend = 2,
-  alpha = 0.05
-)
+
power_dsgn(
+  ind_names,
+  ind_values,
+  unit_var,
+  period_var,
+  unitperiod_var,
+  index_var,
+  unit_rho = 1,
+  period_rho = 0,
+  paneldsgn,
+  nrepeats = NULL,
+  trend_type = "mean",
+  ind_pct = NULL,
+  ind_tail = NULL,
+  trend = 2,
+  alpha = 0.05
+)
@@ -226,9 +226,12 @@

References

See also

-
  • ppd_plot to plot power curves for - panel designs

  • -
+
ppd_plot
+

to plot power curves for + panel designs

+ + +

Author

@@ -237,18 +240,18 @@

Author

Examples

-
# Power for rotating panel with sample size 60
-power_dsgn("Variable_Name",
-  ind_values = 43, unit_var = 280, period_var = 4,
-  unitperiod_var = 40, index_var = 90, unit_rho = 1, period_rho = 0,
-  paneldsgn = list(NoR60 = revisit_dsgn(20,
-    panels = list(NoR60 = list(
-      n = 60, pnl_dsgn = c(1, NA),
-      pnl_n = NA, start_option = "None"
-    )), begin = 1
-  )),
-  nrepeats = NULL, trend_type = "mean", trend = 1.0, alpha = 0.05
-)
+    
# Power for rotating panel with sample size 60
+power_dsgn("Variable_Name",
+  ind_values = 43, unit_var = 280, period_var = 4,
+  unitperiod_var = 40, index_var = 90, unit_rho = 1, period_rho = 0,
+  paneldsgn = list(NoR60 = revisit_dsgn(20,
+    panels = list(NoR60 = list(
+      n = 60, pnl_dsgn = c(1, NA),
+      pnl_n = NA, start_option = "None"
+    )), begin = 1
+  )),
+  nrepeats = NULL, trend_type = "mean", trend = 1.0, alpha = 0.05
+)
 #> $design
 #> [1] "NoR60"
 #> 
@@ -311,7 +314,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/ppd_plot.html b/docs/reference/ppd_plot.html index 464e403..4f84ec8 100644 --- a/docs/reference/ppd_plot.html +++ b/docs/reference/ppd_plot.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -84,19 +84,19 @@

Plot power curves for panel designs

-
ppd_plot(
-  object,
-  plot_type = "standard",
-  trend_type = "mean",
-  xaxis_type = "period",
-  comp_type = "design",
-  dsgns = NULL,
-  indicator = NULL,
-  trend = NULL,
-  period = NULL,
-  alpha = NULL,
-  ...
-)
+
ppd_plot(
+  object,
+  plot_type = "standard",
+  trend_type = "mean",
+  xaxis_type = "period",
+  comp_type = "design",
+  dsgns = NULL,
+  indicator = NULL,
+  trend = NULL,
+  period = NULL,
+  alpha = NULL,
+  ...
+)
@@ -224,37 +224,37 @@

Author

Examples

-
if (FALSE) {
-# Construct a rotating panel design with sample size of 60
-R60N <- revisit_dsgn(20, panels = list(R60N = list(
-  n = 60, pnl_dsgn = c(1, NA),
-  pnl_n = NA, start_option = "None"
-)), begin = 1)
-
-# Construct a fixed panel design with sample size of 60
-F60 <- revisit_dsgn(20, panels = list(F60 = list(
-  n = 60, pnl_dsgn = c(1, 0),
-  pnl_n = NA, start_option = "None"
-)), begin = 1)
-
-# Power for rotating panel with sample size 60
-Power_tst <- power_dsgn("Variable_Name",
-  ind_values = 43, unit_var = 280,
-  period_var = 4, unitperiod_var = 40, index_var = 90,
-  unit_rho = 1, period_rho = 0, paneldsgn = list(
-    R60N = R60N, F60 = F60
-  ), nrepeats = NULL,
-  trend_type = "mean", trend = c(1.0, 2.0), alpha = 0.05
-)
-ppd_plot(Power_tst)
-ppd_plot(Power_tst, dsgns = c("F60", "R60N"))
-ppd_plot(Power_tst, dsgns = c("F60", "R60N"), trend = 1.0)
-ppd_plot(Power_tst,
-  plot_type = "relative", comp_type = "design",
-  trend_type = "mean", trend = c(1, 2), dsgns = c("R60N", "F60"),
-  indicator = "Variable_Name"
-)
-}
+    
if (FALSE) {
+# Construct a rotating panel design with sample size of 60
+R60N <- revisit_dsgn(20, panels = list(R60N = list(
+  n = 60, pnl_dsgn = c(1, NA),
+  pnl_n = NA, start_option = "None"
+)), begin = 1)
+
+# Construct a fixed panel design with sample size of 60
+F60 <- revisit_dsgn(20, panels = list(F60 = list(
+  n = 60, pnl_dsgn = c(1, 0),
+  pnl_n = NA, start_option = "None"
+)), begin = 1)
+
+# Power for rotating panel with sample size 60
+Power_tst <- power_dsgn("Variable_Name",
+  ind_values = 43, unit_var = 280,
+  period_var = 4, unitperiod_var = 40, index_var = 90,
+  unit_rho = 1, period_rho = 0, paneldsgn = list(
+    R60N = R60N, F60 = F60
+  ), nrepeats = NULL,
+  trend_type = "mean", trend = c(1.0, 2.0), alpha = 0.05
+)
+ppd_plot(Power_tst)
+ppd_plot(Power_tst, dsgns = c("F60", "R60N"))
+ppd_plot(Power_tst, dsgns = c("F60", "R60N"), trend = 1.0)
+ppd_plot(Power_tst,
+  plot_type = "relative", comp_type = "design",
+  trend_type = "mean", trend = c(1, 2), dsgns = c("R60N", "F60"),
+  indicator = "Variable_Name"
+)
+}
 
@@ -269,7 +269,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/relrisk_analysis.html b/docs/reference/relrisk_analysis.html index 3584a52..24e99bd 100644 --- a/docs/reference/relrisk_analysis.html +++ b/docs/reference/relrisk_analysis.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,31 +84,31 @@

Relative risk analysis

-
relrisk_analysis(
-  dframe,
-  vars_response,
-  vars_stressor,
-  response_levels = NULL,
-  stressor_levels = NULL,
-  subpops = NULL,
-  siteID = NULL,
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  vartype = "Local",
-  conf = 95,
-  All_Sites = FALSE
-)
+
relrisk_analysis(
+  dframe,
+  vars_response,
+  vars_stressor,
+  response_levels = NULL,
+  stressor_levels = NULL,
+  subpops = NULL,
+  siteID = NULL,
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  vartype = "Local",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -503,27 +503,27 @@

Author

Examples

-
dframe <- data.frame(
-  siteID = paste0("Site", 1:100),
-  wgt = runif(100, 10, 100),
-  xcoord = runif(100),
-  ycoord = runif(100),
-  stratum = rep(c("Stratum1", "Stratum2"), 50),
-  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
-  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
-  All_Sites = rep("All Sites", 100),
-  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
-)
-myresponse <- c("RespVar1", "RespVar2")
-mystressor <- c("StressVar")
-mysubpops <- c("All_Sites", "Resource_Class")
-relrisk_analysis(dframe,
-  vars_response = myresponse,
-  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
-  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
-  stratumID = "stratum"
-)
+    
dframe <- data.frame(
+  siteID = paste0("Site", 1:100),
+  wgt = runif(100, 10, 100),
+  xcoord = runif(100),
+  ycoord = runif(100),
+  stratum = rep(c("Stratum1", "Stratum2"), 50),
+  RespVar1 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  RespVar2 = sample(c("Poor", "Good"), 100, replace = TRUE),
+  StressVar = sample(c("Poor", "Good"), 100, replace = TRUE),
+  All_Sites = rep("All Sites", 100),
+  Resource_Class = rep(c("Agr", "Forest"), c(55, 45))
+)
+myresponse <- c("RespVar1", "RespVar2")
+mystressor <- c("StressVar")
+mysubpops <- c("All_Sites", "Resource_Class")
+relrisk_analysis(dframe,
+  vars_response = myresponse,
+  vars_stressor = mystressor, subpops = mysubpops, siteID = "siteID",
+  weight = "wgt", xcoord = "xcoord", ycoord = "ycoord",
+  stratumID = "stratum"
+)
 #>             Type Subpopulation Response  Stressor nResp  Estimate Estimate_num
 #> 1      All_Sites     All Sites RespVar1 StressVar   100 0.7052158    0.4270103
 #> 2      All_Sites     All Sites RespVar2 StressVar   100 0.7917668    0.4033665
@@ -573,7 +573,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/revisit_bibd.html b/docs/reference/revisit_bibd.html index bd44b99..338eff9 100644 --- a/docs/reference/revisit_bibd.html +++ b/docs/reference/revisit_bibd.html @@ -22,7 +22,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -82,16 +82,16 @@

Create a balanced incomplete block panel revisit design

-
revisit_bibd(
-  n_period,
-  n_pnl,
-  n_visit,
-  nsamp,
-  panel_name = "BIB",
-  begin = 1,
-  skip = 1,
-  iter = 30
-)
+
revisit_bibd(
+  n_period,
+  n_pnl,
+  n_visit,
+  nsamp,
+  panel_name = "BIB",
+  begin = 1,
+  skip = 1,
+  iter = 30
+)
@@ -177,9 +177,9 @@

Author

Examples

-
# Balanced incomplete block design with 20 sample occasions, 20 panels,
-# 3 visits to each unit, and 20 units in each panel.
-revisit_bibd(n_period = 20, n_pnl = 20, n_visit = 3, nsamp = 20)
+    
# Balanced incomplete block design with 20 sample occasions, 20 panels,
+# 3 visits to each unit, and 20 units in each panel.
+revisit_bibd(n_period = 20, n_pnl = 20, n_visit = 3, nsamp = 20)
 #>         1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
 #> BIB_01  0  0  0  0  0 20  0  0 20  0  0 20  0  0  0  0  0  0  0  0
 #> BIB_02 20  0  0  0  0  0 20 20  0  0  0  0  0  0  0  0  0  0  0  0
@@ -217,7 +217,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/revisit_dsgn.html b/docs/reference/revisit_dsgn.html index cfddfcf..ee09069 100644 --- a/docs/reference/revisit_dsgn.html +++ b/docs/reference/revisit_dsgn.html @@ -20,7 +20,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -78,7 +78,7 @@

Create a panel revisit design

-
revisit_dsgn(n_period, panels, begin = 1, skip = 1)
+
revisit_dsgn(n_period, panels, begin = 1, skip = 1)
@@ -226,26 +226,26 @@

Author

Examples

-
# One panel of  60 sample units sampled at every time period: [1-0]
-revisit_dsgn(20, panels = list(
-  Annual = list(
-    n = 60, pnl_dsgn = c(1, 0), pnl.n = NA,
-    start_option = "None"
-  )
-), begin = 1)
+    
# One panel of  60 sample units sampled at every time period: [1-0]
+revisit_dsgn(20, panels = list(
+  Annual = list(
+    n = 60, pnl_dsgn = c(1, 0), pnl.n = NA,
+    start_option = "None"
+  )
+), begin = 1)
 #>         1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
 #> Annual 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60 60
 #> attr(,"class")
 #> [1] "paneldesign"
-
-# Rotating panels of 60 units sampled once and never again: [1-n].  Number
-# of panels equal n_period.
-revisit_dsgn(20,
-  panels = list(
-    R60N = list(n = 60, pnl_dsgn = c(1, NA), pnl_n = NA, start_option = "None")
-  ),
-  begin = 1
-)
+
+# Rotating panels of 60 units sampled once and never again: [1-n].  Number
+# of panels equal n_period.
+revisit_dsgn(20,
+  panels = list(
+    R60N = list(n = 60, pnl_dsgn = c(1, NA), pnl_n = NA, start_option = "None")
+  ),
+  begin = 1
+)
 #>          1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
 #> R60N_01 60  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
 #> R60N_02  0 60  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
@@ -269,15 +269,15 @@ 

Examples

#> R60N_20 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 60 #> attr(,"class") #> [1] "paneldesign" - -# Serially alternating panel with three visits to sample unit then skip -# next two time periods: [3-2] -revisit_dsgn(20, panels = list( - SA60PE = list( - n = 20, pnl_dsgn = c(3, 2), pnl_n = NA, - start_option = "Partial_End" - ) -), begin = 1) + +# Serially alternating panel with three visits to sample unit then skip +# next two time periods: [3-2] +revisit_dsgn(20, panels = list( + SA60PE = list( + n = 20, pnl_dsgn = c(3, 2), pnl_n = NA, + start_option = "Partial_End" + ) +), begin = 1) #> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 #> SA60PE_1 20 20 20 0 0 20 20 20 0 0 20 20 20 0 0 20 20 20 0 0 #> SA60PE_2 0 20 20 20 0 0 20 20 20 0 0 20 20 20 0 0 20 20 20 0 @@ -286,16 +286,17 @@

Examples

#> SA60PE_5 20 20 0 0 20 20 20 0 0 20 20 20 0 0 20 20 20 0 0 20 #> attr(,"class") #> [1] "paneldesign" - -# Split panel of sample units combining above two panel designs: [1-0, 1-n] -revisit_dsgn(n_period = 20, begin = 2017, panels = list( - Annual = list( - n = 60, pnl_dsgn = c(1, 0), pnl.n = NA, - start_option = "None" - ), - R60N = list(n = 60, pnl_dsgn = c(1, NA), pnl_n = NA, start_option = "None") -)) + +# Split panel of sample units combining above two panel designs: [1-0, 1-n] +revisit_dsgn(n_period = 20, begin = 2017, panels = list( + Annual = list( + n = 60, pnl_dsgn = c(1, 0), pnl.n = NA, + start_option = "None" + ), + R60N = list(n = 60, pnl_dsgn = c(1, NA), pnl_n = NA, start_option = "None") +)) #> 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 +#> Annual 60 60 60 60 60 60 60 60 60 60 60 60 60 60 #> R60N_01 60 0 0 0 0 0 0 0 0 0 0 0 0 0 #> R60N_02 0 60 0 0 0 0 0 0 0 0 0 0 0 0 #> R60N_03 0 0 60 0 0 0 0 0 0 0 0 0 0 0 @@ -317,6 +318,7 @@

Examples

#> R60N_19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #> R60N_20 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #> 2031 2032 2033 2034 2035 2036 +#> Annual 60 60 60 60 60 60 #> R60N_01 0 0 0 0 0 0 #> R60N_02 0 0 0 0 0 0 #> R60N_03 0 0 0 0 0 0 @@ -353,7 +355,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/revisit_rand.html b/docs/reference/revisit_rand.html index 9c189ed..ed9bb4c 100644 --- a/docs/reference/revisit_rand.html +++ b/docs/reference/revisit_rand.html @@ -22,7 +22,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -82,16 +82,16 @@

Create a revisit design with random assignment to panels and time periods
-
revisit_rand(
-  n_period,
-  n_pnl,
-  rand_control = "period",
-  n_visit,
-  nsamp,
-  panel_name = "Random",
-  begin = 1,
-  skip = 1
-)
+
revisit_rand(
+  n_period,
+  n_pnl,
+  rand_control = "period",
+  n_visit,
+  nsamp,
+  panel_name = "Random",
+  begin = 1,
+  skip = 1
+)
@@ -176,10 +176,10 @@

Author

Examples

-
revisit_rand(
-  n_period = 20, n_pnl = 10, rand_control = "none", n_visit = 50,
-  nsamp = 20
-)
+    
revisit_rand(
+  n_period = 20, n_pnl = 10, rand_control = "none", n_visit = 50,
+  nsamp = 20
+)
 #>            1  2  3  4  5  6 7  8  9 10 11 12 13 14 15 16 17 18 19 20
 #> Random_01  0  0  0 20 20  0 0  0  0  0  0  0  0 20  0 20  0 20  0  0
 #> Random_02  0  0  0  0  0 20 0  0  0  0  0 20  0  0  0  0 20  0  0  0
@@ -193,10 +193,10 @@ 

Examples

#> Random_10 0 0 20 0 0 0 0 0 0 20 0 20 0 20 0 20 0 20 0 0 #> attr(,"class") #> [1] "paneldesign" -revisit_rand( - n_period = 20, n_pnl = 10, rand_control = "panel", n_visit = 5, - nsamp = 10 -) +revisit_rand( + n_period = 20, n_pnl = 10, rand_control = "panel", n_visit = 5, + nsamp = 10 +) #> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 #> Random_01 10 0 10 0 0 0 10 0 10 0 0 10 0 0 0 0 0 0 0 0 #> Random_02 0 0 0 0 0 0 0 10 0 10 0 0 10 10 10 0 0 0 0 0 @@ -210,10 +210,10 @@

Examples

#> Random_10 0 0 0 0 0 0 0 10 0 10 0 0 0 10 0 0 0 0 10 10 #> attr(,"class") #> [1] "paneldesign" -revisit_rand( - n_period = 20, n_pnl = 10, rand_control = "period", - n_visit = 5, nsamp = 10 -) +revisit_rand( + n_period = 20, n_pnl = 10, rand_control = "period", + n_visit = 5, nsamp = 10 +) #> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 #> Random_01 0 10 0 10 0 0 10 10 10 0 0 10 10 10 10 10 10 10 0 10 #> Random_02 0 10 0 0 0 10 0 0 0 10 10 0 10 10 10 0 0 0 10 0 @@ -241,7 +241,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/sp_balance.html b/docs/reference/sp_balance.html index 2dba24a..cedf22a 100644 --- a/docs/reference/sp_balance.html +++ b/docs/reference/sp_balance.html @@ -19,7 +19,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -76,14 +76,14 @@

Calculate spatial balance metrics

-
sp_balance(
-  object,
-  sframe,
-  stratum_var = NULL,
-  ip = NULL,
-  metrics = "pielou",
-  extents = FALSE
-)
+
sp_balance(
+  object,
+  sframe,
+  stratum_var = NULL,
+  ip = NULL,
+  metrics = "pielou",
+  extents = FALSE
+)
@@ -167,13 +167,13 @@

Author

Examples

-
if (FALSE) {
-sample <- grts(NE_Lakes, 30)
-sp_balance(sample$sites_base, NE_Lakes)
-strata_n <- c(low = 25, high = 30)
-sample_strat <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
-sp_balance(sample_strat$sites_base, NE_Lakes, stratum_var = "ELEV_CAT", metric = "rmse")
-}
+    
if (FALSE) {
+sample <- grts(NE_Lakes, 30)
+sp_balance(sample$sites_base, NE_Lakes)
+strata_n <- c(low = 25, high = 30)
+sample_strat <- grts(NE_Lakes, n_base = strata_n, stratum_var = "ELEV_CAT")
+sp_balance(sample_strat$sites_base, NE_Lakes, stratum_var = "ELEV_CAT", metric = "rmse")
+}
 
@@ -188,7 +188,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/sp_frame.html b/docs/reference/sp_frame.html index 9db45bd..c803e2b 100644 --- a/docs/reference/sp_frame.html +++ b/docs/reference/sp_frame.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,9 +74,9 @@

sp_frame objects

-
sp_frame(frame)
-
-sp_unframe(sp_frame)
+
sp_frame(frame)
+
+sp_unframe(sp_frame)
@@ -105,11 +105,11 @@

Details

Examples

-
NE_Lakes <- sp_frame(NE_Lakes)
-class(NE_Lakes)
+    
NE_Lakes <- sp_frame(NE_Lakes)
+class(NE_Lakes)
 #> [1] "sp_frame"   "sf"         "data.frame"
-NE_Lakes <- sp_unframe(NE_Lakes)
-class(NE_Lakes)
+NE_Lakes <- sp_unframe(NE_Lakes)
+class(NE_Lakes)
 #> [1] "sf"         "data.frame"
 
@@ -125,7 +125,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/sp_plot.html b/docs/reference/sp_plot.html index 3b1ce31..9bf265d 100644 --- a/docs/reference/sp_plot.html +++ b/docs/reference/sp_plot.html @@ -25,7 +25,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -81,43 +81,43 @@

Plot sampling frames, design sites, and analysis data.

are of the distributions of the right-hand side variables. If the left-hand side of the variable contains a variable, plots are of the left-hand size variable for each level of each right-hand side variable. -This function is largely built on plot.sf(), and all spsurvey plotting -methods can supply additional arguments to plot.sf(). For more information on +This function is largely built on plot.sf(), and all spsurvey plotting +methods can supply additional arguments to plot.sf(). For more information on plotting in sf, run ?sf::plot.sf(). Equivalent to spsurvey::plot(); both are currently maintained for backwards compatibility.

-
sp_plot(object, ...)
-
-# S3 method for default
-sp_plot(
-  object,
-  formula = ~1,
-  xcoord,
-  ycoord,
-  crs,
-  var_args = NULL,
-  varlevel_args = NULL,
-  geom = FALSE,
-  onlyshow = NULL,
-  fix_bbox = TRUE,
-  ...
-)
-
-# S3 method for sp_design
-sp_plot(
-  object,
-  sframe = NULL,
-  formula = ~siteuse,
-  siteuse = NULL,
-  var_args = NULL,
-  varlevel_args = NULL,
-  geom = FALSE,
-  onlyshow = NULL,
-  fix_bbox = TRUE,
-  ...
-)
+
sp_plot(object, ...)
+
+# S3 method for default
+sp_plot(
+  object,
+  formula = ~1,
+  xcoord,
+  ycoord,
+  crs,
+  var_args = NULL,
+  varlevel_args = NULL,
+  geom = FALSE,
+  onlyshow = NULL,
+  fix_bbox = TRUE,
+  ...
+)
+
+# S3 method for sp_design
+sp_plot(
+  object,
+  sframe = NULL,
+  formula = ~siteuse,
+  siteuse = NULL,
+  var_args = NULL,
+  varlevel_args = NULL,
+  geom = FALSE,
+  onlyshow = NULL,
+  fix_bbox = TRUE,
+  ...
+)
@@ -129,7 +129,7 @@

Arguments

...
-

Additional arguments to pass to plot.sf().

+

Additional arguments to pass to plot.sf().

formula
@@ -225,14 +225,14 @@

Author

Examples

-
if (FALSE) {
-data("NE_Lakes")
-sp_plot(NE_Lakes, formula = ~ELEV_CAT)
-sample <- grts(NE_Lakes, 30)
-sp_plot(sample, NE_Lakes)
-data("NLA_PNW")
-sp_plot(NLA_PNW, formula = ~BMMI)
-}
+    
if (FALSE) {
+data("NE_Lakes")
+sp_plot(NE_Lakes, formula = ~ELEV_CAT)
+sample <- grts(NE_Lakes, 30)
+sp_plot(sample, NE_Lakes)
+data("NLA_PNW")
+sp_plot(NLA_PNW, formula = ~BMMI)
+}
 
@@ -247,7 +247,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/sp_rbind.html b/docs/reference/sp_rbind.html index e5a2b21..b83d48e 100644 --- a/docs/reference/sp_rbind.html +++ b/docs/reference/sp_rbind.html @@ -21,7 +21,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -80,7 +80,7 @@

Combine rows from GRTS or IRS samples.

-
sp_rbind(object, siteuse = NULL)
+
sp_rbind(object, siteuse = NULL)
@@ -111,11 +111,11 @@

Author

Examples

-
if (FALSE) {
-sample <- grts(NE_Lakes, 50, n_over = 10)
-sample <- sp_rbind(sample)
-write_sf(sample, "mypath/sample.shp")
-}
+    
if (FALSE) {
+sample <- grts(NE_Lakes, 50, n_over = 10)
+sample <- sp_rbind(sample)
+write_sf(sample, "mypath/sample.shp")
+}
 
@@ -130,7 +130,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/sp_summary.html b/docs/reference/sp_summary.html index 6f4f2ca..902a4d6 100644 --- a/docs/reference/sp_summary.html +++ b/docs/reference/sp_summary.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,13 +84,13 @@

Summarize sampling frames, design sites, and analysis data.

-
sp_summary(object, ...)
-
-# S3 method for default
-sp_summary(object, formula = ~1, onlyshow = NULL, ...)
-
-# S3 method for sp_design
-sp_summary(object, formula = ~siteuse, siteuse = NULL, onlyshow = NULL, ...)
+
sp_summary(object, ...)
+
+# S3 method for default
+sp_summary(object, formula = ~1, onlyshow = NULL, ...)
+
+# S3 method for sp_design
+sp_summary(object, formula = ~siteuse, siteuse = NULL, onlyshow = NULL, ...)
@@ -156,13 +156,13 @@

Author

Examples

-
if (FALSE) {
-data("NE_Lakes")
-sp_summary(NE_Lakes, ELEV ~ 1)
-sp_summary(NE_Lakes, ~ ELEV_CAT * AREA_CAT)
-sample <- grts(NE_Lakes, 100)
-sp_summary(sample, ~ ELEV_CAT * AREA_CAT)
-}
+    
if (FALSE) {
+data("NE_Lakes")
+sp_summary(NE_Lakes, ELEV ~ 1)
+sp_summary(NE_Lakes, ~ ELEV_CAT * AREA_CAT)
+sample <- grts(NE_Lakes, 100)
+sp_summary(sample, ~ ELEV_CAT * AREA_CAT)
+}
 
@@ -177,7 +177,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/spsurvey-package.html b/docs/reference/spsurvey-package.html index 70b8be5..cf7e064 100644 --- a/docs/reference/spsurvey-package.html +++ b/docs/reference/spsurvey-package.html @@ -37,7 +37,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -141,7 +141,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/stopprnt.html b/docs/reference/stopprnt.html index 48d8143..c568a30 100644 --- a/docs/reference/stopprnt.html +++ b/docs/reference/stopprnt.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -74,7 +74,7 @@

Print grts() and irs() errors.

-
stopprnt(stop_df = get("stop_df", envir = .GlobalEnv), m = 1:nrow(stop_df))
+
stopprnt(stop_df = get("stop_df", envir = .GlobalEnv), m = 1:nrow(stop_df))
@@ -114,7 +114,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/summary.html b/docs/reference/summary.html index 6593700..2013966 100644 --- a/docs/reference/summary.html +++ b/docs/reference/summary.html @@ -23,7 +23,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -84,11 +84,11 @@

Summarize sampling frames, design sites, and analysis data.

-
# S3 method for sp_frame
-summary(object, formula = ~1, onlyshow = NULL, ...)
-
-# S3 method for sp_design
-summary(object, formula = ~siteuse, siteuse = NULL, onlyshow = NULL, ...)
+
# S3 method for sp_frame
+summary(object, formula = ~1, onlyshow = NULL, ...)
+
+# S3 method for sp_design
+summary(object, formula = ~siteuse, siteuse = NULL, onlyshow = NULL, ...)
@@ -155,13 +155,13 @@

Author

Examples

-
if (FALSE) {
-data("NE_Lakes")
-summary(NE_Lakes, ELEV ~ 1)
-summary(NE_Lakes, ~ ELEV_CAT * AREA_CAT)
-sample <- grts(NE_Lakes, 100)
-summary(sample, ~ ELEV_CAT * AREA_CAT)
-}
+    
if (FALSE) {
+data("NE_Lakes")
+summary(NE_Lakes, ELEV ~ 1)
+summary(NE_Lakes, ~ ELEV_CAT * AREA_CAT)
+sample <- grts(NE_Lakes, 100)
+summary(sample, ~ ELEV_CAT * AREA_CAT)
+}
 
@@ -176,7 +176,7 @@

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/trend_analysis.html b/docs/reference/trend_analysis.html index 0d1a2e6..cbf6033 100644 --- a/docs/reference/trend_analysis.html +++ b/docs/reference/trend_analysis.html @@ -29,7 +29,7 @@ spsurvey - 5.5.0 + 5.5.1 @@ -96,37 +96,37 @@

Trend analysis

-
trend_analysis(
-  dframe,
-  vars_cat = NULL,
-  vars_cont = NULL,
-  subpops = NULL,
-  model_cat = "SLR",
-  cat_rhs = NULL,
-  model_cont = "LMM",
-  cont_rhs = NULL,
-  siteID = "siteID",
-  yearID = "year",
-  weight = "weight",
-  xcoord = NULL,
-  ycoord = NULL,
-  stratumID = NULL,
-  clusterID = NULL,
-  weight1 = NULL,
-  xcoord1 = NULL,
-  ycoord1 = NULL,
-  sizeweight = FALSE,
-  sweight = NULL,
-  sweight1 = NULL,
-  fpc = NULL,
-  popsize = NULL,
-  invprboot = TRUE,
-  nboot = 1000,
-  vartype = "Local",
-  jointprob = "overton",
-  conf = 95,
-  All_Sites = FALSE
-)
+
trend_analysis(
+  dframe,
+  vars_cat = NULL,
+  vars_cont = NULL,
+  subpops = NULL,
+  model_cat = "SLR",
+  cat_rhs = NULL,
+  model_cont = "LMM",
+  cont_rhs = NULL,
+  siteID = "siteID",
+  yearID = "year",
+  weight = "weight",
+  xcoord = NULL,
+  ycoord = NULL,
+  stratumID = NULL,
+  clusterID = NULL,
+  weight1 = NULL,
+  xcoord1 = NULL,
+  ycoord1 = NULL,
+  sizeweight = FALSE,
+  sweight = NULL,
+  sweight1 = NULL,
+  fpc = NULL,
+  popsize = NULL,
+  invprboot = TRUE,
+  nboot = 1000,
+  vartype = "Local",
+  jointprob = "overton",
+  conf = 95,
+  All_Sites = FALSE
+)
@@ -637,35 +637,35 @@

Author

Examples

-
# Example using a categorical variable with three resource classes and a
-# continuous variable
-mydframe <- data.frame(
-  siteID = rep(paste0("Site", 1:40), rep(5, 40)),
-  yearID = rep(seq(2000, 2020, by = 5), 40),
-  wgt = rep(runif(40, 10, 100), rep(5, 40)),
-  xcoord = rep(runif(40), rep(5, 40)),
-  ycoord = rep(runif(40), rep(5, 40)),
-  All_Sites = rep("All Sites", 200),
-  Region = sample(c("North", "South"), 200, replace = TRUE),
-  Resource_Class = sample(c("Good", "Fair", "Poor"), 200, replace = TRUE),
-  ContVar = rnorm(200, 10, 1)
-)
-myvars_cat <- c("Resource_Class")
-myvars_cont <- c("ContVar")
-mysubpops <- c("All_Sites", "Region")
-trend_analysis(
-  dframe = mydframe,
-  vars_cat = myvars_cat,
-  vars_cont = myvars_cont,
-  subpops = mysubpops,
-  model_cat = "WLR",
-  model_cont = "SLR",
-  siteID = "siteID",
-  yearID = "yearID",
-  weight = "wgt",
-  xcoord = "xcoord",
-  ycoord = "ycoord"
-)
+    
# Example using a categorical variable with three resource classes and a
+# continuous variable
+mydframe <- data.frame(
+  siteID = rep(paste0("Site", 1:40), rep(5, 40)),
+  yearID = rep(seq(2000, 2020, by = 5), 40),
+  wgt = rep(runif(40, 10, 100), rep(5, 40)),
+  xcoord = rep(runif(40), rep(5, 40)),
+  ycoord = rep(runif(40), rep(5, 40)),
+  All_Sites = rep("All Sites", 200),
+  Region = sample(c("North", "South"), 200, replace = TRUE),
+  Resource_Class = sample(c("Good", "Fair", "Poor"), 200, replace = TRUE),
+  ContVar = rnorm(200, 10, 1)
+)
+myvars_cat <- c("Resource_Class")
+myvars_cont <- c("ContVar")
+mysubpops <- c("All_Sites", "Region")
+trend_analysis(
+  dframe = mydframe,
+  vars_cat = myvars_cat,
+  vars_cont = myvars_cont,
+  subpops = mysubpops,
+  model_cat = "WLR",
+  model_cont = "SLR",
+  siteID = "siteID",
+  yearID = "yearID",
+  weight = "wgt",
+  xcoord = "xcoord",
+  ycoord = "ycoord"
+)
 #> $catsum
 #>        Type Subpopulation      Indicator Category Trend_Estimate
 #> 1 All_Sites     All Sites Resource_Class     Fair     0.04523685
@@ -740,7 +740,7 @@ 

Examples

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.

diff --git a/docs/reference/warnprnt.html b/docs/reference/warnprnt.html index dcca243..e974210 100644 --- a/docs/reference/warnprnt.html +++ b/docs/reference/warnprnt.html @@ -18,7 +18,7 @@ spsurvey - 5.5.0 + 5.5.1
@@ -74,7 +74,7 @@

Print grts(), irs()), and analysis function warnings

-
warnprnt(warn_df = get("warn_df", envir = .GlobalEnv), m = 1:nrow(warn_df))
+
warnprnt(warn_df = get("warn_df", envir = .GlobalEnv), m = 1:nrow(warn_df))
@@ -114,7 +114,7 @@

Author

-

Site built with pkgdown 2.0.6.

+

Site built with pkgdown 2.0.7.