content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' This is one of the main functions. It perform the cross validation on ahm models to select the optimal setting of hyper parameter h #' #' @param x data.frame Note the column names of the x should be in the order of major components, minor components, and no interactions between major or minor components are needed. #' @param y numeric vector #' @param metric "mse" or "AICc" the metric used in cross validtion where the minimum is selected as the optimal #' @param num_major number of major components #' @param dist_minor the allocation of number of minor components nested under major components #' @param type heredity type, weak heredity is the current support type #' @param lambda_seq a numeric vector for the options of lambda used in ridge regression for estimating the initials #' @param mapping_type the form of the coefficient function of major components in front of corresponding minor terms. Currently only support "power" #' @param rep_gcv the number of choices of tuning parameter used in the GCV selection #' @param powerh_path if NULL, then the default is the vector: round(seq(0.001,2,length.out =15),3) #' @param nfolds used in cv.glmnet for initial value of parameters in the non-negative garrote method #' @param alpha 0 is for the ridge in glmnet https://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html #' @return Return a list #' @export #' @examples #' \donttest{ #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' powerh_path = round(seq(0.001,2,length.out =15),3) #' num_major = 3; dist_minor = c(2,2,1) #' res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major, dist_minor, type = "weak" #' , alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) #' object = res$metric_mse #' } cv.ahm = function(y, x, powerh_path=NULL, metric = c("mse","AICc"), num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100){ # store original y, and x with interactions augmented x_orig = x x_aug_orig = augment_df (x, num_major, dist_minor) y_orig = y # cross validation, in the study we use leave one out cross validation # also loops over the sequence of h values if (is.null(powerh_path)) powerh_path = round(seq(0.001,2,length.out =15),3) res_aicc = res_mse = vector(mode = "list", length = length(powerh_path)) for (ll in 1: length(powerh_path)) { h_tmp = powerh_path[ll] x_tmp = mapping_function (x, num_major, dist_minor, mapping_type, powerh=h_tmp) # add mapping function in front of the minor components x_aug0 = augment_df (x=x_tmp, num_major, dist_minor) # the following codes are modified from the package oem cv.nfolds = nrow(x) foldid = sample(rep(seq(cv.nfolds), length = nrow(x))) outlist = as.list(seq(cv.nfolds)) aicc_path = mscv_path = c() for (i in seq(cv.nfolds)) { which = foldid == i if (is.matrix(y)) { y_sub = y[!which,] } else { y_sub = y[!which] } outlist[[i]] = ahm (y_sub, x[!which,], num_major, dist_minor, type, alpha, lambda_seq, nfolds, mapping_type, powerh=h_tmp, rep_gcv) aicc_path = c(aicc_path, outlist[[i]]$aicc) pred = matrix(x_aug0[which,], ncol = ncol(x_aug0)) %*% outlist[[i]]$beta_nng mscv = drop(base::crossprod(pred-y[which])) mscv_path = c(mscv_path, mscv) # currently only support mse metric } res_mse[[ll]] = mscv_path res_aicc[[ll]] = aicc_path } res_mse res_aicc sel_id_mse = which.min(base::sapply(res_mse, mean)) sel_id_aicc = which.min(base::sapply(res_aicc, mean)) # base::sapply(res, sd) h_tmp_mse = powerh_path[sel_id_mse] h_tmp_aicc = powerh_path[sel_id_aicc] cvlist_mse = list(cvm = base::sapply(res_mse, mean), cvsd = base::sapply(res_mse, sd), metric = "mse", cvmin = min(base::sapply(res_mse, mean)) ,cvmedian = base::sapply(res_mse, median), cvmedian_min = min(base::sapply(res_mse, median)), h_optim= h_tmp_mse, powerh_path = powerh_path, cv.nfolds=cv.nfolds) cvlist_aicc = list(cvm = base::sapply(res_aicc, mean), cvsd = base::sapply(res_aicc, sd), metric = "aicc", cvmin = min(base::sapply(res_aicc, mean)) ,cvmedian = base::sapply(res_mse, median), cvmedian_min = min(base::sapply(res_mse, median)), h_optim= h_tmp_aicc, powerh_path = powerh_path, cv.nfolds=cv.nfolds) # after selecting the optimal h, run ahm on the whole data set out = vector(mode = "list", length = 2) # mse and aicc names(out) = c("metric_mse", "metric_aicc") tmp = ahm (y, x, num_major, dist_minor, type, alpha, lambda_seq, nfolds, mapping_type, powerh = h_tmp_mse, rep_gcv) out[[1]] = c(cvlist_mse, tmp) tmp = ahm (y, x, num_major, dist_minor, type, alpha, lambda_seq, nfolds, mapping_type, powerh = h_tmp_aicc, rep_gcv) out[[2]] = c(cvlist_aicc, tmp) class(out) = "cv.ahm" return(out) } #' Coefficient method for the fitted cv.ahm object #' #' @param object cv.ahm object #' @param metric "mse" or "aicc" #' @param ... not used #' @return a numerical vector #' @export #' @examples #' \donttest{ #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' powerh_path = round(seq(0.001,2,length.out =15),3) #' num_major = 3; dist_minor = c(2,2,1) #' res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major, dist_minor, type = "weak" #' , alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) #' coefficients = coef(res) #' } #' coef.cv.ahm = function(object, metric = "mse", ...) { if (0) { #object = res #coef.cv.ahm(object = res, metric = "mse") #coef.cv.ahm(object = res, metric = "aicc") #predict.cv.ahm(object = res, metric = "aicc") } if (metric == "mse") { obj = object$metric_mse } else if (metric == "aicc"){ obj = object$metric_aicc } class(obj) = "ahm" coeff = coef(obj) return (coeff) } #' Predict method for the fitted cv.ahm object #' #' @param object cv.ahm object #' @param metric "mse" or "aicc" #' @param ... not used #' @param newx Matrix of new values for x at which predictions are to be made. #' @return Return a list #' @export #' @examples #' \donttest{ #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' powerh_path = round(seq(0.001,2,length.out =15),3) #' num_major = 3; dist_minor = c(2,2,1) #' res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major, dist_minor, type = "weak" #' , alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) #' pred = predict(res) #' } #' predict.cv.ahm = function(object, newx, metric = "mse", ...) { if (metric == "mse") { obj = object$metric_mse } else if (metric == "aicc"){ obj = object$metric_aicc } class(obj) = "ahm" out = predict(obj, newx) return (out) }
/scratch/gouwar.j/cran-all/cranData/AHM/R/cv_ahm.R
#' Design points for the simplex centroid design with 3 components #' #' #' @docType data #' #' @usage data(design_simplex_centroid_design_3_major_component) #' #' @format data.frame #' #' @keywords datasets #' #' #' #' @examples #' data(design_simplex_centroid_design_3_major_component) #' \donttest{print(design_simplex_centroid_design_3_major_component)} "design_simplex_centroid_design_3_major_component" data(design_simplex_centroid_design_3_major_component, envir=environment())
/scratch/gouwar.j/cran-all/cranData/AHM/R/design_simplex_centroid_design_3_major_component-data.R
#' The mymaximin function generates the matrix of maximin design points. #' It uses the simplex centroid design as the base design, then in a stochastics way sample the candidate design points generated by the function partition. #' #' @param pool, partition the base design points provided to the function #' @param n numeric, sample size #' @param m numeric, 3 stands for 3 components, i.e. c1, c2, and c3 #' @param iter numeric, iterations used in the stochastic sampling #' @param Xorig matrix, initial design points #' @description {This method is modified based on Prof. Bobby Gramacy's Computer Experiment lecture at Virginia Tech. \href{http://bobby.gramacy.com/teaching/}{Prof. Gramacy's lecture website}} #' @return Return a matrixt of the design points for the major components #' @export #' @examples #' \donttest{ #' # The case of unconstrainted experiments #' library(mixexp) #' num_size = 8 # num points in the design for the major component #' Xorig = as.matrix(SCD(3)) #' # all possible combinations sum to 1 #' pool_3d =partitions::compositions(1000, 3,include.zero = TRUE)/1000 #' res_C = mymaximin(pool=pool_3d, n=num_size, m=3, iter=1e5, Xorig=Xorig) #' DesignPoints(res_C,cornerlabs = c("c3","c2","c1"),axislabs=c("c1","c2","c3")) #' #' # The case of constrainted experiments #' library(mixexp) #' num_size = 8 # num points in the design for the major component #' # all possible combinations sum to 1 #' pool_3d =partitions::compositions(1000, 3,include.zero = TRUE)/1000 #' c1_min=0.2 #' c1_max=0.45 #' c2_min=0.4 #' c2_max=0.6 #' c3_min=0.1 #' c3_max=0.25 #' tmp = Xvert(nfac=3,lc=c(c1_min,c2_min,c3_min),uc =c(c1_max,c2_max,c3_max),ndm=1,pseudo=FALSE) #' Xorig=tmp[c(1:6,13),c(1:3)] #' colnames(Xorig)=c("V1","V2","V3") #' pool_3d = t(dplyr::filter(as.data.frame(t(as.matrix(pool_3d))),t(pool_3d)[,1] > c1_min & #' t(pool_3d)[,1] <= c1_max & #' t(pool_3d)[,2] > c2_min & #' t(pool_3d)[,2] <= c2_max & #' t(pool_3d)[,3] > c3_min & #' t(pool_3d)[,3] <= c3_max #' ) #' ) #' res_C = mymaximin(pool=pool_3d, n=num_size, m=3, iter=1e5, Xorig=Xorig) #' DesignPoints(res_C,cornerlabs = c("c3","c2","c1"),axislabs=c("c1","c2","c3") #' ,x1lower=c1_min,x2lower=c2_min,x3lower=c3_min #' ,x1upper=c1_max,x2upper=c2_max,x3upper=c3_max, pseudo=FALSE) #' } mymaximin <- function(pool, n=50, m=3, iter=1e5, Xorig=NULL) { # n = 3 for c1, c2, c3 # m is the sample size # pool is from the partition results # library(plgp, quietly=TRUE) # pool = pool_3d # X <- matrix(runif(n*m), ncol=m) ## initial design if (is.null(Xorig)) { # use Xorig to include certain points n_size = n } else { n_size = n - nrow(Xorig) } init_id = sample(c(1:dim(pool)[2]),size = n_size,replace = FALSE) X = t(pool[,init_id]) X <- rbind(X, Xorig) # combine initial base and randomly picked row d <- plgp::distance(X) #distance(X) # d <- as.numeric(d[upper.tri(d)]) md <- min(d) for(t in 1:iter) { #print(paste("iter is ", t, sep='')) row <- sample(1:n, 1) xold <- X[row,] ## random row selection X[row,] <- pool[,sample(c(1:dim(pool)[2]),size = 1,replace = FALSE)] ## runif(m) ## random new row dprime <- plgp::distance(X) #distance(X) # dprime <- as.numeric(dprime[upper.tri(dprime)]) mdprime <- min(dprime) if(mdprime > md) { md <- mdprime ## accept } else { X[row,] <- xold } ## reject } rownames(X) = c(1:nrow(X)) return(X) # design matrix }
/scratch/gouwar.j/cran-all/cranData/AHM/R/maximin_mixexp.R
#' The candidate search points in the nonlinear optimization for the optimal value #' in the Pringles experiment #' #' @docType data #' #' @usage data(pringles_candidates2search) #' #' @format matrix #' #' @keywords datasets #' #' #' @examples #' data(pringles_candidates2search) #' \donttest{print(pringles_candidates2search)} "pringles_candidates2search" data(pringles_candidates2search, envir=environment())
/scratch/gouwar.j/cran-all/cranData/AHM/R/pringles_candidates2search-data.R
#' Pringles experiment data set with the percent of Fat as the response #' #' #' @docType data #' #' @usage data(pringles_fat) #' #' @format data.frame #' #' @keywords datasets #' #' @references Kang, L., Joseph, V.R. and Brenneman, W.A. (2011). Design and modeling #' strategies for mixture-of-mixtures experiments. \emph{Technometrics}, #' 53(2), 125--36. #' (\href{https://www.tandfonline.com/doi/abs/10.1198/TECH.2011.08132}{tandfonline}) #' #' #' @examples #' data(pringles_fat) #' \donttest{print(pringles_fat)} "pringles_fat" data(pringles_fat, envir=environment())
/scratch/gouwar.j/cran-all/cranData/AHM/R/pringles_fat-data.R
#' Pringles experiment data set with the Hardness as the response #' #' @docType data #' #' @usage data(pringles_hardness) #' #' @format data.frame #' #' @keywords datasets #' #' @references Kang, L., Joseph, V.R. and Brenneman, W.A. (2011). Design and modeling #' strategies for mixture-of-mixtures experiments. \emph{Technometrics}, #' 53(2), 125--36. #' (\href{https://www.tandfonline.com/doi/abs/10.1198/TECH.2011.08132}{tandfonline}) #' #' #' @examples #' data(pringles_hardness) #' \donttest{print(pringles_hardness)} "pringles_hardness" data(pringles_hardness, envir=environment())
/scratch/gouwar.j/cran-all/cranData/AHM/R/pringles_hardness-data.R
## ---- echo = F, eval = TRUE, message = F, error = F---------------------- library(AHM) library(mixexp) if (0) { library(devtools); load_all() } ## ---- echo = TRUE, eval = T---------------------------------------------- data("coating") dat = coating h_tmp = 1.1 x = dat[,c("c1","c2","x11","x12","x21","x22")] y = dat[,ncol(dat)] ptm <- proc.time() out = ahm (y, x, num_major = 2, dist_minor = c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm summary(out) ## ---- echo = TRUE, eval = FALSE------------------------------------------ # powerh_path = round(seq(0.001,2,length.out =15),3) # # res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major=2, dist_minor=c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) # # object = res$metric_mse ## ---- echo = TRUE, eval = FALSE------------------------------------------ # data("pringles_fat") # data_fat = pringles_fat # h_tmp = 1.3 # # x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] # y = data_fat[,1] # ptm <- proc.time() # out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), # type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, # mapping_type = c("power"), powerh = h_tmp, # rep_gcv=100) # proc.time() - ptm ## ---- echo = TRUE, eval = FALSE------------------------------------------ # summary(out) # # coefficients = coef(out) # fitted = predict(out, x) # ## ---- echo = TRUE, eval = FALSE------------------------------------------ # data("pringles_hardness") # dat = pringles_hardness # h_tmp = 1.3 # # x = dat[,c("c1","c2","c3","x11","x12","x21","x22")] # y = dat[,1] # ptm <- proc.time() # out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), # type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, # mapping_type = c("power"), powerh = h_tmp, # rep_gcv=100) # proc.time() - ptm # summary(out)
/scratch/gouwar.j/cran-all/cranData/AHM/inst/doc/Introduction_to_the_AHM_package.R
--- title: "Introduction to the AHM Package" author: | | Sumin Shen, Lulu Kang, and Xinwei Deng fontsize: 11 pt fig_caption: yes fig_height: 6 fig_width: 6 header-includes: - \usepackage{bm} - \usepackage{pdfpages} - \usepackage{amsmath,amssymb,latexsym,graphics,subfigure} - \usepackage{empheq} - \usepackage{booktabs} - \usepackage{longtable} - \usepackage{array} - \usepackage{adjustbox} - \usepackage{caption} - \usepackage{algorithm} - \usepackage{algcompatible} - \usepackage{mathtools} - \usepackage{multirow,ulem,bm } - \usepackage[dvipdf]{epsfig} - \usepackage{multirow} - \usepackage[table]{xcolor} - \usepackage{wrapfig} - \usepackage{float} - \usepackage{colortbl} - \usepackage{pdflscape} - \usepackage{tabu} - \usepackage{threeparttable} - \usepackage{graphicx} - \usepackage{float} output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to the AHM package} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- --- # Abstract We present a short tutorial and introduction to use the package AHM, which is implemented for the additive heredity model discussed in the paper Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments in 2019. **Key Words**: Additive Heredity Model, Mixture-of-Mixtures Experiments; Nonnegative Garrotte Method. --- # 1. Introduction The purpose of this package is to provide a solution for the mixture-of-mixtures (MoM) experiments. In the mixture-of-mixtures experiments, the mixture components are called the major components and can be made up of sub-components. The sub-components within the major components are called the minor components. Assume that there are \(q\) major components, and let \(c_k\) be the proportion of the \(k\)th major component. Then, \[ \begin{aligned} \sum_{k =1}^{q} c_{k} = 1, 0 \le c_{k} \le 1, \quad k =1, \ldots, q. \end{aligned} \] Moreover, each major component is composed of \(m_k\) minor components, whose proportions with respect to \(c_{k}\) are \(x_{kj}\), such that, \[ \begin{aligned} \sum_{l =1}^{m_{k}} x_{kl} = 1, 0 \le x_{kl} \le 1, \quad l = 1, \ldots, m_{k}. \end{aligned} \] The idea is to address this problem by the additive heredity model (AHM). More details about this method is available in the paper Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments. In the package there are two main functions, ahm and cv.ahm. The function ahm is to fit the additive heredity model given the design points. The function cv.ahm is to find an optimized hyper parameter $h$ used in the AHM via cross validation, and gives out the model fitting results based on the optimal hyper parameter $h$. This vignette is intended to get new users quickly on using the AHM package to fit the additive heredity model for the mixture-of-mixtures experiments. Section 2 gives short code snippets on how to use the package for cases in the paper. # 2. AHM on Real-Data Analysis ```{r, echo = F, eval = TRUE, message = F, error = F} library(AHM) library(mixexp) if (0) { library(devtools); load_all() } ``` ### Photoresist-Coating Experiment The objective of photoresist-coating experiment is to determine the effect of proportions of base resin in the formulation on the photoresist material's characteristic of interest (Cornell and Ramsey 1998). The major component is defined as the base resin type, and the minor component is defined as the minor resins possessing different dissolution rates (slow and fast). There are two major components: $c_{1}$ and $c_{2}$. which are composed of two minor components: $x_{11}$, $x_{12}$, and $x_{21}$, $x_{22}$, respectively. The range of values of both major and minor components is [0, 1]. In the experiment, the two major component proportions are ($c_{1}$, $c_{2}$)=(0.75, 0.25), (0.5, 0.5), and (0.25, 0.75). The two minor component proportions are ($x_{i1}$, $x_{i2}$) = (1, 0), (0.5, 0.5), and (0, 1), where $i=1, 2$. There are in total 42 measured response at 27 design points. Measurements were replicated twice at certain design points if their minor components's multiplication, $x_{11}x_{12}$ and $x_{21}x_{22}$, are neither equal to zero. The real data are included in the R package. ```{r, echo = TRUE, eval = T} data("coating") dat = coating h_tmp = 1.1 x = dat[,c("c1","c2","x11","x12","x21","x22")] y = dat[,ncol(dat)] ptm <- proc.time() out = ahm (y, x, num_major = 2, dist_minor = c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm summary(out) ``` Use the function cv.ahm to find the optimal value of the hyper parameter $h$. ```{r, echo = TRUE, eval = FALSE} powerh_path = round(seq(0.001,2,length.out =15),3) res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major=2, dist_minor=c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) object = res$metric_mse ``` ### Pringles Experiment In this section, we analyze the Pringles experiment (Kang et al. 2011) of which the goal is to develop a new kind of Pringles potato crisp such that the percentage of fat and the hardness in the potato crisps are optimized. There are three major components: \(c_{1}\), \(c_{2}\), and \(c_{3}\), among which the major components \(c_{1}\) and \(c_{2}\) are composed of two minor components: \(x_{11}\), \(x_{12}\), and \(x_{21}\), and \(x_{22}\), respectively. The major component \(c_{3}\) is a pure material. The constraints on the components are given by \[ \begin{aligned} c_{1}+c_{2}+c_{3}=1, ~~& 0.601 \le c_{1} \le 0.643, \nonumber \\ 0.34 \le c_{2} \le 0.38, ~~& 0.017 \le c_{3} \le 0.019, \nonumber \\ x_{11}+x_{12} = 1, ~~& x_{21} + x_{22} =1, \nonumber \\ 0.835 \le x_{11} \le 0.905, ~~& 0.095 \le x_{12} \le 0.165, \nonumber \\ 0.9 \le x_{21} \le 0.98, ~~& 0.02 \le x_{22} \le 0.1. \nonumber \end{aligned} \] The design points are obtained from a major-minor crossed design. The responses are "Hardnes" and "\%Fat". The real data are included in the R package. - The response "%Fat" ```{r, echo = TRUE, eval = FALSE} data("pringles_fat") data_fat = pringles_fat h_tmp = 1.3 x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] y = data_fat[,1] ptm <- proc.time() out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm ``` The common functions such as summary, coef, and predict are available for the object. ```{r, echo = TRUE, eval = FALSE} summary(out) coefficients = coef(out) fitted = predict(out, x) ``` - The response "Hardness" ```{r, echo = TRUE, eval = FALSE} data("pringles_hardness") dat = pringles_hardness h_tmp = 1.3 x = dat[,c("c1","c2","c3","x11","x12","x21","x22")] y = dat[,1] ptm <- proc.time() out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm summary(out) ``` ## References <!-- \begin{description} --> \item Cornell, J.A. and Ramsey, P.J. (1998). A Generalized mixture model for categorized-components problems with an application to a photoresist-coating experiment. \emph{Technometrics}, 40(1), 48-61. \item Kang, L., Joseph, V.R. and Brenneman, W.A. (2011). Design and modeling strategies for mixture-of-mixtures experiments. \emph{Technometrics}, 53(2), 125--36. \item Lawson, J. and Willden, C. (2016). Mixture experiments in R using mixexp. \emph{Journal of Statistical Software}, 72(c02). \item Shen, S., Kang, L., and Deng, X. (2019). Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments, \emph{Technometrics}, in press. <!-- \end{description} -->
/scratch/gouwar.j/cran-all/cranData/AHM/inst/doc/Introduction_to_the_AHM_package.Rmd
--- title: "Introduction to the AHM Package" author: | | Sumin Shen, Lulu Kang, and Xinwei Deng fontsize: 11 pt fig_caption: yes fig_height: 6 fig_width: 6 header-includes: - \usepackage{bm} - \usepackage{pdfpages} - \usepackage{amsmath,amssymb,latexsym,graphics,subfigure} - \usepackage{empheq} - \usepackage{booktabs} - \usepackage{longtable} - \usepackage{array} - \usepackage{adjustbox} - \usepackage{caption} - \usepackage{algorithm} - \usepackage{algcompatible} - \usepackage{mathtools} - \usepackage{multirow,ulem,bm } - \usepackage[dvipdf]{epsfig} - \usepackage{multirow} - \usepackage[table]{xcolor} - \usepackage{wrapfig} - \usepackage{float} - \usepackage{colortbl} - \usepackage{pdflscape} - \usepackage{tabu} - \usepackage{threeparttable} - \usepackage{graphicx} - \usepackage{float} output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to the AHM package} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- --- # Abstract We present a short tutorial and introduction to use the package AHM, which is implemented for the additive heredity model discussed in the paper Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments in 2019. **Key Words**: Additive Heredity Model, Mixture-of-Mixtures Experiments; Nonnegative Garrotte Method. --- # 1. Introduction The purpose of this package is to provide a solution for the mixture-of-mixtures (MoM) experiments. In the mixture-of-mixtures experiments, the mixture components are called the major components and can be made up of sub-components. The sub-components within the major components are called the minor components. Assume that there are \(q\) major components, and let \(c_k\) be the proportion of the \(k\)th major component. Then, \[ \begin{aligned} \sum_{k =1}^{q} c_{k} = 1, 0 \le c_{k} \le 1, \quad k =1, \ldots, q. \end{aligned} \] Moreover, each major component is composed of \(m_k\) minor components, whose proportions with respect to \(c_{k}\) are \(x_{kj}\), such that, \[ \begin{aligned} \sum_{l =1}^{m_{k}} x_{kl} = 1, 0 \le x_{kl} \le 1, \quad l = 1, \ldots, m_{k}. \end{aligned} \] The idea is to address this problem by the additive heredity model (AHM). More details about this method is available in the paper Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments. In the package there are two main functions, ahm and cv.ahm. The function ahm is to fit the additive heredity model given the design points. The function cv.ahm is to find an optimized hyper parameter $h$ used in the AHM via cross validation, and gives out the model fitting results based on the optimal hyper parameter $h$. This vignette is intended to get new users quickly on using the AHM package to fit the additive heredity model for the mixture-of-mixtures experiments. Section 2 gives short code snippets on how to use the package for cases in the paper. # 2. AHM on Real-Data Analysis ```{r, echo = F, eval = TRUE, message = F, error = F} library(AHM) library(mixexp) if (0) { library(devtools); load_all() } ``` ### Photoresist-Coating Experiment The objective of photoresist-coating experiment is to determine the effect of proportions of base resin in the formulation on the photoresist material's characteristic of interest (Cornell and Ramsey 1998). The major component is defined as the base resin type, and the minor component is defined as the minor resins possessing different dissolution rates (slow and fast). There are two major components: $c_{1}$ and $c_{2}$. which are composed of two minor components: $x_{11}$, $x_{12}$, and $x_{21}$, $x_{22}$, respectively. The range of values of both major and minor components is [0, 1]. In the experiment, the two major component proportions are ($c_{1}$, $c_{2}$)=(0.75, 0.25), (0.5, 0.5), and (0.25, 0.75). The two minor component proportions are ($x_{i1}$, $x_{i2}$) = (1, 0), (0.5, 0.5), and (0, 1), where $i=1, 2$. There are in total 42 measured response at 27 design points. Measurements were replicated twice at certain design points if their minor components's multiplication, $x_{11}x_{12}$ and $x_{21}x_{22}$, are neither equal to zero. The real data are included in the R package. ```{r, echo = TRUE, eval = T} data("coating") dat = coating h_tmp = 1.1 x = dat[,c("c1","c2","x11","x12","x21","x22")] y = dat[,ncol(dat)] ptm <- proc.time() out = ahm (y, x, num_major = 2, dist_minor = c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm summary(out) ``` Use the function cv.ahm to find the optimal value of the hyper parameter $h$. ```{r, echo = TRUE, eval = FALSE} powerh_path = round(seq(0.001,2,length.out =15),3) res = cv.ahm (y, x, powerh_path=powerh_path, metric = "mse", num_major=2, dist_minor=c(2,2), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), rep_gcv=100) object = res$metric_mse ``` ### Pringles Experiment In this section, we analyze the Pringles experiment (Kang et al. 2011) of which the goal is to develop a new kind of Pringles potato crisp such that the percentage of fat and the hardness in the potato crisps are optimized. There are three major components: \(c_{1}\), \(c_{2}\), and \(c_{3}\), among which the major components \(c_{1}\) and \(c_{2}\) are composed of two minor components: \(x_{11}\), \(x_{12}\), and \(x_{21}\), and \(x_{22}\), respectively. The major component \(c_{3}\) is a pure material. The constraints on the components are given by \[ \begin{aligned} c_{1}+c_{2}+c_{3}=1, ~~& 0.601 \le c_{1} \le 0.643, \nonumber \\ 0.34 \le c_{2} \le 0.38, ~~& 0.017 \le c_{3} \le 0.019, \nonumber \\ x_{11}+x_{12} = 1, ~~& x_{21} + x_{22} =1, \nonumber \\ 0.835 \le x_{11} \le 0.905, ~~& 0.095 \le x_{12} \le 0.165, \nonumber \\ 0.9 \le x_{21} \le 0.98, ~~& 0.02 \le x_{22} \le 0.1. \nonumber \end{aligned} \] The design points are obtained from a major-minor crossed design. The responses are "Hardnes" and "\%Fat". The real data are included in the R package. - The response "%Fat" ```{r, echo = TRUE, eval = FALSE} data("pringles_fat") data_fat = pringles_fat h_tmp = 1.3 x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] y = data_fat[,1] ptm <- proc.time() out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm ``` The common functions such as summary, coef, and predict are available for the object. ```{r, echo = TRUE, eval = FALSE} summary(out) coefficients = coef(out) fitted = predict(out, x) ``` - The response "Hardness" ```{r, echo = TRUE, eval = FALSE} data("pringles_hardness") dat = pringles_hardness h_tmp = 1.3 x = dat[,c("c1","c2","c3","x11","x12","x21","x22")] y = dat[,1] ptm <- proc.time() out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, mapping_type = c("power"), powerh = h_tmp, rep_gcv=100) proc.time() - ptm summary(out) ``` ## References <!-- \begin{description} --> \item Cornell, J.A. and Ramsey, P.J. (1998). A Generalized mixture model for categorized-components problems with an application to a photoresist-coating experiment. \emph{Technometrics}, 40(1), 48-61. \item Kang, L., Joseph, V.R. and Brenneman, W.A. (2011). Design and modeling strategies for mixture-of-mixtures experiments. \emph{Technometrics}, 53(2), 125--36. \item Lawson, J. and Willden, C. (2016). Mixture experiments in R using mixexp. \emph{Journal of Statistical Software}, 72(c02). \item Shen, S., Kang, L., and Deng, X. (2019). Additive Heredity Model for the Analysis of Mixture-of-Mixtures Experiments, \emph{Technometrics}, in press. <!-- \end{description} -->
/scratch/gouwar.j/cran-all/cranData/AHM/vignettes/Introduction_to_the_AHM_package.Rmd
# Function to extract the biggest cross-correlations from an mcmc or mcmc.list object bigCrossCorr <- function(x, big = 0.6, digits=3) { # Function to extract the biggest cross-correlations # from an mcmc or mcmc.list object. # # x : an mcmc or mcmc.list object as returned by rjags::jags or jagsUI::jags.samples # big : only values outside the range -big to +big will be returned # digits : number of digits to return. # # Returns a data frame with 3 columns, for the names of the two parameters and # the correlation coefficient. # See ?coda::crosscorr for details # # Mike Meredith, 1 Jan 2017 if(!inherits(x, c("mcmc", "mcmc.list"))) stop("'x' must be an 'mcmc' or 'mcmc.list' object.") xcor <- coda::crosscorr(x) xcor[lower.tri(xcor, diag=TRUE)] <- 0 BIG <- which(abs(xcor) > big, arr.ind=TRUE) nms <- rownames(xcor) return(data.frame(par1=nms[BIG[, 1]], par2=nms[BIG[, 2]], corr=round(diag(xcor[BIG[, 1], BIG[, 2]]), digits))) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/bigCrossCorr.R
# AHM2 section 3.4.1 # Define a function to create an m-array based on capture-histories (CH) # Modified from Kery & Schaub (2012), a couple of loops replaced # with vector operations by Mike. ch2marray <- function(CH){ CH <- as.matrix(CH) # might be a data frame nind <- nrow(CH) n.occasions <- ncol(CH) m.array <- matrix(data = 0, ncol = n.occasions+1, nrow = n.occasions) # First column and last row will be removed later # Last col is for number never-seen-again # Calculate the number of released individuals at each time period m.array[,1] <- colSums(CH) for (i in 1:nind){ pos <- which(CH[i,]!=0) # When was animal caught? for (z in seq_along(pos[-1])) { # Does nothing if length(pos) == 1 m.array[pos[z], pos[z+1]] <- m.array[pos[z], pos[z+1]] + 1 } #z } #i # Calculate the number of individuals that is never recaptured m.array[, n.occasions+1] <- m.array[, 1] - rowSums(m.array[, -1]) # Remove last row (releases on last occasion will never be recaptured) # and 1st col (no REcaptures on 1st occasion). out <- m.array[-n.occasions, -1] return(out) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/ch2marray_AHM2_3-4-1.R
# Convenience wrappers for colorRampPalette which take care of specifying colors. # Colors are from colorbrewer2.org and all are color-blind friendly fixRange <- function(rng, max) { rng <- unique(round(rng)) rngmin <- max(1, min(rng, max)) rngmax <- min(max, max(rng, 1)) if(rngmin == rngmax) warning("You specified a single colour; this will not be pretty.", call.=FALSE) return(rngmin:rngmax) } rampYOR <- function(n=5, range=1:9, bias=1, ...) { n <- max(round(n)) range <- fixRange(range, max=9) cols <- c('#ffffcc', '#ffeda0', '#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c', '#bd0026', '#800026') colorRampPalette(cols[range], bias=bias, ...)(n) } rampGreys <- function(n=5, range=1:9, bias=1, ...) { n <- max(round(n)) range <- fixRange(range, max=9) cols <- c('#ffffff','#f0f0f0','#d9d9d9','#bdbdbd','#969696','#737373','#525252','#252525','#000000') colorRampPalette(cols[range], bias=bias, ...)(n) } rampGreens <- function(n=5, range=1:9, bias=1, ...) { n <- max(round(n)) range <- fixRange(range, max=9) cols <- c('#f7fcf5','#e5f5e0','#c7e9c0','#a1d99b','#74c476','#41ab5d','#238b45','#006d2c','#00441b') colorRampPalette(cols[range], bias=bias, ...)(n) } rampBYR <- function(n=5, range=1:11, bias=1, ...) { n <- max(round(n)) range <- fixRange(range, max=11) cols <- rev(c('#a50026','#d73027','#f46d43','#fdae61','#fee090', '#ffffbf','#e0f3f8','#abd9e9','#74add1','#4575b4','#313695')) colorRampPalette(cols[range], bias=bias, ...)(n) } rampGBr <- function(n=5, range=1:11, bias=1, ...) { n <- max(round(n)) range <- fixRange(range, max=11) cols <- rev(c('#543005', '#8c510a', '#bf812d', '#dfc27d', '#f6e8c3', '#f5f5f5', '#c7eae5', '#80cdc1', '#35978f', '#01665e', '#003c30')) colorRampPalette(cols[range], bias=bias, ...)(n) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/color_ramps.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # data.fn - AHM1 section 4.3 p135 # Function to simulate data for binomial mixture model # (generally: spatially and temporally replicated counts) # (introduced in AHM1 Section 4.3) data.fn <- function(M = 267, J = 3, mean.lambda = 2, beta1 = -2, beta2 = 2, beta3 = 1, mean.detection = 0.3, alpha1 = 1, alpha2 = -3, alpha3 = 0, show.plot = TRUE){ # # Function to simulate point counts replicated at M sites during J occasions. # Population closure is assumed for each site. # Expected abundance may be affected by elevation (elev), # forest cover (forest) and their interaction. # Expected detection probability may be affected by elevation, # wind speed (wind) and their interaction. # Function arguments: # M: Number of spatial replicates (sites) # J: Number of temporal replicates (occasions) # mean.lambda: Mean abundance at value 0 of abundance covariates # beta1: Main effect of elevation on abundance # beta2: Main effect of forest cover on abundance # beta3: Interaction effect on abundance of elevation and forest cover # mean.detection: Mean detection prob. at value 0 of detection covariates # alpha1: Main effect of elevation on detection probability # alpha2: Main effect of wind speed on detection probability # alpha3: Interaction effect on detection of elevation and wind speed # show.plot: if TRUE, plots of the data will be displayed; # set to FALSE if you are running simulations. if(FALSE) x <- NULL # deals with R CMD check issues with 'curve' logit <- plogis # so 'logit' is displayed on y axis # Create covariates elev <- runif(n = M, -1, 1) # Scaled elevation forest <- runif(n = M, -1, 1) # Scaled forest cover wind <- array(runif(n = M*J, -1, 1), dim = c(M, J)) # Scaled wind speed # Model for abundance beta0 <- log(mean.lambda) # Mean abundance on link scale lambda <- exp(beta0 + beta1*elev + beta2*forest + beta3*elev*forest) N <- rpois(n = M, lambda = lambda) # Realised abundance Ntotal <- sum(N) # Total abundance (all sites) psi.true <- mean(N>0) # True occupancy in sample # Model for observations alpha0 <- qlogis(mean.detection) # mean detection on link scale p <- plogis(alpha0 + alpha1*elev + alpha2*wind + alpha3*elev*wind) C <- matrix(NA, nrow = M, ncol = J) # Prepare matrix for counts for (i in 1:J){ # Generate counts by survey C[,i] <- rbinom(n = M, size = N, prob = p[,i]) } summaxC <- sum(apply(C,1,max)) # Sum of max counts (all sites) psi.obs <- mean(apply(C,1,max)>0) # Observed occupancy in sample # All the plots if(show.plot){ oldpar <- par(mfrow = c(2, 2), cex.main = 1) oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) tryPlot <- try( { # Page 1: lambda relationships curve(exp(beta0 + beta1*x), -1, 1, col = "red", main = "Relationship lambda-elevation \nat average forest cover", frame.plot = FALSE, xlab = "Scaled elevation") plot(elev, lambda, xlab = "Scaled elevation", main = "Relationship lambda-elevation \nat observed forest cover", frame.plot = FALSE) curve(exp(beta0 + beta2*x), -1, 1, col = "red", main = "Relationship lambda-forest \ncover at average elevation", xlab = "Scaled forest cover", frame.plot = FALSE) plot(forest, lambda, xlab = "Scaled forest cover", main = "Relationship lambda-forest cover \nat observed elevation", frame.plot = FALSE) # Page 2: p relationships par(mfrow = c(2, 2)) curve(logit(alpha0 + alpha1*x), -1, 1, col = "red", main = "Relationship p-elevation \nat average wind speed", xlab = "Scaled elevation", frame.plot = FALSE) matplot(elev, p, xlab = "Scaled elevation", main = "Relationship p-elevation\n at observed wind speed", pch = "*", frame.plot = FALSE) curve(logit(alpha0 + alpha2*x), -1, 1, col = "red", main = "Relationship p-wind speed \n at average elevation", xlab = "Scaled wind speed", frame.plot = FALSE) matplot(wind, p, xlab = "Scaled wind speed", main = "Relationship p-wind speed \nat observed elevation", pch = "*", frame.plot = FALSE) # Page 3: counts matplot(elev, C, xlab = "Scaled elevation", main = "Relationship counts and elevation", pch = "*", frame.plot = FALSE) matplot(forest, C, xlab = "Scaled forest cover", main = "Relationship counts and forest cover", pch = "*", frame.plot = FALSE) matplot(wind, C, xlab = "Scaled wind speed", main = "Relationship counts and wind speed", pch = "*", frame.plot = FALSE) desc <- paste('Counts at', M, 'sites during', J, 'surveys') # hist(C, main = desc, breaks = 50, col = "grey") histCount(C, NULL, color = "grey", main = desc) }, silent = TRUE ) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list(M = M, J = J, mean.lambda = mean.lambda, beta0 = beta0, beta1 = beta1, beta2 = beta2, beta3 = beta3, mean.detection = mean.detection, alpha0 = alpha0, alpha1 = alpha1, alpha2 = alpha2, alpha3 = alpha3, elev = elev, forest = forest, wind = wind, lambda = lambda, N = N, p = p, C = C, Ntotal = Ntotal, psi.true = psi.true, summaxC = summaxC, psi.obs = psi.obs)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/data-fn_AHM1_4.3_Simulate_binomial_mixture_model.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # HELPER FUNCTIONS # e2dist # Function to compute Euclidean distances # (from the R package that goes along with the # Spatial Capture-Recapture book by Royle et al. (2014)) e2dist <- function(x, y=NULL){ if(is.null(dim(x)) && length(x) == 2) # length 2 vector x <- matrix(x, nrow=1) if(ncol(x) != 2) stop("Argument 'x' must be a 2-column matrix or data frame, or a length 2 vector.", call.=FALSE) if(is.null(y)) { y <- x } else { if(is.null(dim(y)) && length(y) == 2) y <- matrix(y, nrow=1) if(ncol(y) != 2) stop("Argument 'y' must be a 2-column matrix or data frame, or a length 2 vector.", call.=FALSE) } i <- sort(rep(1:nrow(y), nrow(x))) dvec <- sqrt((x[, 1] - y[i, 1])^2 + (x[, 2] - y[i, 2])^2) matrix(dvec, nrow = nrow(x), ncol = nrow(y), byrow = FALSE) } # ....................................................................
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/e2dist.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # Function returning three fit-statistics (used in parboot GOF tests throughout book) # (used, among others, in AHM1 Chapter 7, e.g., AHM1 Section 7.5.4) # Updated 2019-01-14 to cope with NAs in the data, see AHM2 2.3.3 fitstats <- function(fm) { observed <- unmarked::getY(fm@data) notna <- !is.na(observed) # to accommodate missing values expected <- fitted(fm) resids <- residuals(fm) sse <- sum(resids[notna]^2) chisq <- sum((observed[notna] - expected[notna])^2 / expected[notna]) freeTuke <- sum((sqrt(observed) - sqrt(expected))[notna]^2) out <- c(SSE=sse, Chisq=chisq, freemanTukey=freeTuke) return(out) } # Define new fitstats function # (introduced in AHM1 Section 7.9.3) fitstats2 <- function(fm) { observed <- unmarked::getY(fm@data) expected <- fitted(fm) resids <- residuals(fm) n.obs <- apply(observed,1,sum,na.rm=TRUE) n.pred <- apply(expected,1,sum,na.rm=TRUE) sse <- sum(resids^2,na.rm=TRUE) chisq <- sum((observed - expected)^2 / expected,na.rm=TRUE) freeTuke <- sum((sqrt(observed) - sqrt(expected))^2,na.rm=TRUE) freeTuke.n<- sum((sqrt(n.obs)-sqrt(n.pred))^2,na.rm=TRUE) sse.n <- sum( (n.obs -n.pred)^2,na.rm=TRUE) chisq.n <- sum((n.obs - n.pred)^2 / expected,na.rm=TRUE) out <- c(SSE=sse, Chisq=chisq, freemanTukey=freeTuke, SSE.n = sse.n, Chisq.n = chisq.n, freemanTukey.n=freeTuke.n) return(out) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/fitstats_assessment_of_fit.R
# function by Mathias Tobler which computes the correlation matrix in residual occurrence in an latent-variable multi-species occupancy or N-mixture model as showcased on Chapter 8 of AHM2. Input must be the sims.list of the latent variables (LV), the sims.list of their coefficients (lv.coef) and the number of species. The function returns the residual correlation matrix, as described in Tobler et al. (Ecology, 2019) and other recent JSDM papers. getLVcorrMat <- function(lv.coef, type=c("occupancy", "Nmix"), stat=mean){ type <- match.arg(type) niter <- dim(lv.coef)[1] nspec <- dim(lv.coef)[2] cm.all <- array(NA, dim = c(niter, nspec, nspec)) if(type == "occupancy") { eps.res <- apply(lv.coef, c(1,2), function(x) 1 - sum(x^2)) fix <- diag(apply(eps.res, 2, mean)) } else { fix <- 0 } for(i in 1:niter){ # for each mcmc sample cm.all[i,,] <- cov2cor(tcrossprod(lv.coef[i,,]) + fix) } if(is.null(stat)) { return(cm.all) } else { return(apply(cm.all, c(2, 3), stat)) } } # Function by Marc, email 2019-05-31 getEcorrMat <- function(beta, stat=mean){ nspec <- dim(beta)[2] ecorraw <- apply(beta, 1, function(x) cor(t(x))) # matrix, nspec^2 x niter ecorarr <- array(t(ecorraw), dim=c(dim(beta)[1], nspec, nspec)) if(is.null(stat)) { return(ecorarr) } else { return(apply(ecorarr, c(2, 3), stat)) } }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/getLVcorrMat_AHM2_8--.R
# AHM2 section 1.6, old name graph.ssm2 # To plot the observed time-series and the estimated population trajectories, we have adapted the SSM graphing function graph.ssm from chapter 5 in Kery & Schaub (2012) to multivariate SSMs and call this new function graph.ssm2. When browsing through the graphs for each site, we see that the estimated states (i.e., the latent relative abundance) represent a greatly smoothed picture with respect to the more jagged observed data (Fig. 1-11). # Define function to draw a graph to summarize results # for multivariate time series of counts graphSSM<- function(ssm, C){ fitted <- lower <- upper <- numeric() nsites <- nrow(ssm$mean$n) T <- ncol(ssm$mean$n) for(j in 1:nsites){ for (i in 1:T){ fitted[i] <- mean(ssm$sims.list$n[,j,i]) lower[i] <- quantile(ssm$sims.list$n[,j,i], 0.025) upper[i] <- quantile(ssm$sims.list$n[,j,i], 0.975) } m1 <- min(c(C[j,], fitted, lower), na.rm = TRUE) m2 <- 1.2*max(c(C[j,], fitted, upper), na.rm = TRUE) oldpar <- par(mar = c(4.5, 4, 1, 1), cex = 1.2, cex.main = 0.8) on.exit(par(oldpar)) oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit(devAskNewPage(oldAsk), add=TRUE) # Restore previous setting plot(0, 0, ylim = c(m1, m2), xlim = c(0.5, T), main = paste('Site', j), ylab = "Population size", xlab = "Year", las = 1, col = "black", type = "l", lwd = 2, frame = FALSE, axes = FALSE) axis(2, las = 1) axis(1, at = seq(0, T, 1), labels = seq(0, T, 1)) axis(1, at = 0:T, labels = rep("", T + 1), tcl = -0.25) polygon(x = c(1:T, T:1), y = c(lower, upper[T:1]), col = "gray90", border = "gray90") points(C[j,], type = "l", col = "black", lwd = 2) points(fitted, type = "l", col = "blue", lwd = 2) legend(x = 1, y = m2, legend = c("Observed data (C)", "Estimated latent states (n) with 95% CRI"), lty = c(1, 1), lwd = c(2, 2), col = c("black", "blue"), bty = "n", cex = 0.6) } }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/graphSSM_AHM2_1-6.R
# A helper function to plot a 'histogram' of counts (non-negative integers). # Used in simNmix (2 places), data.fn, sim.fn, simComm. # NOT EXPORTED # Mike Meredith, 2016-12-25, updated 2020-04-03 histCount <- function(C1, C2 = NULL, x0 = FALSE, nbmax = 30, # max number of bins color = c('skyblue', adjustcolor('red', 0.5)), border='white', bty='n', ylab="Frequency", xlab="Count", main="", ...) { # Plots a 'histogram' of 1 or 2 sets of counts (non-negative integers). # # C1 : a vector of non-negative integers; negative values silently ignored. # C2 : an optional second vector of non-negative integers, or NULL. # x0 : if TRUE, the x axis begins at zero, otherwise the lowest frequency. # color : a length 2 vector of colours for the two histograms; the second colour # should be semi-transparent; can be scalar if C2 = NULL. # border : colour of the border around bars, or NA for no border. # other arguments as usual for plotting functions. rng <- range(C1, C2) if(x0) rng[1] <- 0 range <- diff(rng) + 1 if(range <= nbmax) { bwidth=1 } else { bwidth <- range %/% nbmax + 1 } nb <- ceiling(range/bwidth) br <- seq(min(C1,C2), max(C1,C2)+bwidth, by=bwidth) - 0.5 H1 <- hist(C1, breaks = br, plot=FALSE) ymax <- max(H1$counts) if(!is.null(C2)) { H2 <- hist(C2, breaks = br, plot=FALSE) ymax <- max(ymax, H2$counts) } plot(H1, col=color[1], border=border,yaxs='i', ylim=c(0, ymax), xlim=rng, bty=bty, ylab=ylab, xlab=xlab, main=main) if(!is.null(C2)) { plot(H2, col=color[2], border=border,add=TRUE) plot(H1, breaks = br, col=NA, border=border,add=TRUE) # replot borders of first histogram } axis(2) # replot axis segments(rng[1], 0, rng[2], 0) # do line at foot of bars }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/helper_histCount.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # Helper function to draw scale for image (from SCR book) # (introduced somewhere in AHM1 Chapter 9) # cex.legend added 12 July 2019, v.0.1.4.9083 image_scale <- function (z, col, x, y = NULL, size = NULL, digits = 2, labels = c("breaks", "ranges"), cex.legend=1) { n <- length(col) usr <- par("usr") mx <- mean(usr[1:2]) my <- mean(usr[3:4]) dx <- diff(usr[1:2]) dy <- diff(usr[3:4]) if (missing(x)) x <- mx + 1.05 * dx/2 else if (is.list(x)) { if (length(x$x) == 2) size <- c(diff(x$x), -diff(x$y)/n) y <- x$y[1] x <- x$x[1] } else x <- x[1] if (is.null(size)) if (is.null(y)) { size <- 0.618 * dy/n y <- my + 0.618 * dy/2 } else size <- (y - my) * 2/n if (length(size) == 1) size <- rep(size, 2) if (is.null(y)) y <- my + n * size[2]/2 i <- seq(along = col) rect(x, y - i * size[2], x + size[1], y - (i - 1) * size[2], col = rev(col), xpd = TRUE) rng <- range(z, na.rm = TRUE) bks <- seq(from = rng[2], to = rng[1], length = n + 1) bks <- formatC(bks, format = "f", digits = digits) labels <- match.arg(labels) if (labels == "breaks") ypts <- y - c(0, i) * size[2] else { bks <- paste(bks[-1], bks[-(n + 1)], sep = " - ") ypts <- y - (i - 0.5) * size[2] } text(x = x + 1.2 * size[1], y = ypts, labels = bks, adj = ifelse(size[1] > 0, 0, 1), xpd = TRUE, cex=cex.legend) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/image_scale.R
# AHM2 Section 7.2.2 # Named fp.modSel in earlier drafts. # modSelFP <- function(mod.list){ # Thanks to Chris Sutherland! # (the same of the 'one-s**t hypothesis') # Model <- names(mod.list) # nPars <- sapply(mod.list, function(x)length(x@opt$par)) # nll <- sapply(mod.list, function(x)x@opt$value) # AIC <- round(sapply(mod.list, function(x)x@AIC),2) # dAIC <- round(AIC - min(AIC), 2) # AICwt <- round(exp(-0.5 * dAIC)/sum(exp(-0.5 * dAIC)), 2) # modTab <- data.frame(nPars, AIC, dAIC, AICwt, row.names = Model)[order(AIC),] # modTab$cuWt <- cumsum(modTab$AICwt) # Do this after sorting. # return(modTab) # } modSelFP <- function(mod.list){ message("Please use the functions in 'unmarked' for this:\n modSel(fitList(fits=mod.list))") unmarked::modSel(unmarked::fitList(fits=mod.list)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/modSelFP.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # instRemPiFun, crPiFun, crPiFun.Mb, MhPiFun - AHM1 section 7.7 p346 # Customised 'piFun's for unmarked::multinomPois # pi function for removal design with 3 intervals of unequal length (2, 3, 5 minutes) # (introduced in AHM1 Section 7.7) instRemPiFun <- function(p){ M <- nrow(p) J <- ncol(p) pi <- matrix(NA, M, J) p[,1] <- pi[,1] <- 1 - (1 - p[,1])^2 p[,2] <- 1 - (1 - p[,2])^3 p[,3] <- 1 - (1 - p[,3])^5 for(i in 2:J) { pi[,i] <- pi[, i - 1]/p[, i - 1] * (1 - p[, i - 1]) * p[, i] } return(pi) } # ............................................................................. # pi function for capture-recapture design with 3 surveys # (introduced in AHM1 Section 7.8) crPiFun <- function(p) { p1 <- p[,1] p2 <- p[,2] p3 <- p[,3] cbind("001" = (1 - p1) * (1 - p2) * p3, "010" = (1 - p1) * p2 * (1 - p3), "011" = (1 - p1) * p2 * p3, "100" = p1 * (1 - p2) * (1 - p3), "101" = p1 * (1 - p2) * p3, "110" = p1 * p2 * (1 - p3), "111" = p1 * p2 * p3) } # ............................................................................. # pi function for capture-recapture design with 3 surveys and behavioural response # (introduced in AHM1 Section 7.8.2) crPiFun.Mb <- function(p) { pNaive <- p[,1] pWise <- p[,3] cbind("001" = (1 - pNaive) * (1 - pNaive) * pNaive, "010" = (1 - pNaive) * pNaive * (1 - pWise), "011" = (1 - pNaive) * pNaive * pWise, "100" = pNaive * (1 - pWise) * (1 - pWise), "101" = pNaive * (1 - pWise) * pWise, "110" = pNaive * pWise * (1 - pWise), "111" = pNaive * pWise * pWise) } # .................................................................................. # Pi function for model with individual detection heterogeneity # (introduced in AHM1 Section 7.8.3) MhPiFun <- function(p) { mu <- qlogis(p[,1]) # logit(p) sig <- exp(qlogis(p[1,2])) J <- ncol(p) M <- nrow(p) il <- matrix(NA, nrow=M, ncol=7) dimnames(il) <- list(NULL, c("001","010","011","100","101","110","111")) for(i in 1:M) { il[i,1] <- integrate( function(x) { (1-plogis(mu[i]+x))*(1-plogis(mu[i]+x))*plogis(mu[i]+x)*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,2] <- integrate( function(x) { (1-plogis(mu[i]+x))*plogis(mu[i]+x)*(1-plogis(mu[i]+x))*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,3] <- integrate( function(x) { (1-plogis(mu[i]+x))*plogis(mu[i]+x)*plogis(mu[i]+x)*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,4] <- integrate( function(x) { plogis(mu[i]+x)*(1-plogis(mu[i]+x))*(1-plogis(mu[i]+x))*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,5] <- integrate( function(x) { plogis(mu[i]+x)*(1-plogis(mu[i]+x))*plogis(mu[i]+x)*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,6] <- integrate( function(x) { plogis(mu[i]+x)*plogis(mu[i]+x)*(1-plogis(mu[i]+x))*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value il[i,7] <- integrate( function(x) { plogis(mu[i]+x)*plogis(mu[i]+x)*plogis(mu[i]+x)*dnorm(x,0,sig) }, lower=-Inf, upper=Inf, stop.on.error=FALSE)$value } return(il) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/piFun_customised_AHM1_7-7.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # playRN - AHM1 section 6.13.1 p301 # Function to play Royle-Nichols model # (introduced in AHM1 Section 6.13.1) playRN <- function(M = 267, J = 3, mean.abundance = 1, mean.detection = 0.3, show.plots = TRUE, verbose = TRUE){ # Function generates replicated count data under the Nmix model of Royle (2004), # then 'degrades' the data to detection/nondetection and fits the RN model # (Royle & Nichols 2003) using unmarked and estimates site-specific abundance. # Requires function simNmix and package unmarked. # # devAskNewPage(ask = FALSE) ## leave it as it is! # # Simulate Nmix data under a range of abundance levels data <- simNmix(nsites = M, nvisits = J, mean.lam = mean.abundance, mean.p = mean.detection, beta2.lam = 1, beta3.p = -1, beta.p.survey = -1, show.plots = FALSE, verbose = verbose) # Turn counts into detection/nondetection data y <- data$C # Copy counts C into y y[y>0] <- 1 # Turn counts >0 into 1 # Load unmarked, format data and summarize umf <- unmarkedFrameOccu(y=y, siteCovs= data.frame(cov2 = data$site.cov[,2], cov3 = data$site.cov[,3]), obsCovs = list(obscov = data$survey.cov)) # Fit data-generating model fm <- occuRN(~cov3+obscov ~cov2, data=umf) # Estimate local abundance N and plot against true N (known in simulation) Nest <- bup(ranef(fm, K = ), "mean") if(show.plots) { # par(mfrow = c(1,1)) ## leave it alone tryPlot <- try( { plot(data$N, Nest, xlab = "True local abundance", ylab = "Estimated local abundance", frame = FALSE) abline(0,1, lwd = 3) # 1:1 line abline(lm(Nest ~ data$N), col = "blue", lwd = 3) # Regression }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } slope <- coef(lm(Nest ~ data$N))[2] # Is 1 if model perfect return(list(nsites = M, nvisits = J, coef = coef(fm), slope = slope)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/playRN_AHM1_6-13-1_Simulate_Royle-Nichols_model.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # plot.Nmix.resi and map.Nmix.resi - AHM1 section 6.9.3 p261 # Function to produce some residual plots in AHM1 Section 6.9.3 plot_Nmix_resi <- function(fmP, fmNB, fmZIP){ # Function does diagnostic plots for one Nmix model fitted with all three # mixture distributions currently availabe in unmarked: # Poisson, negative binomial and zero-inflated Poisson # For each, fitted values vs. observed data and # residuals vs. fitted values are plotted. # Plot fitted vs. observed data op <- par(mfrow = c(2,3), mar = c(4,4,2,2), cex = 1.2) ; on.exit(par(op)) tmp1 <- range(c(fitted(fmP), fitted(fmNB), fitted(fmZIP)), na.rm = T) limits1 = round(c(tmp1[1], tmp1[2])) tmp2 <- range(c(residuals(fmP), residuals(fmNB), residuals(fmZIP)), na.rm = T) limits2 = round(c(tmp2[1], tmp2[2])) plot(fitted(fmP)~ fmP@data@y, xlab = "Observed data", ylab = "Fitted values (P)", frame = FALSE, ylim = limits1) abline(0,1, lwd = 3 ) abline(lm(c(fitted(fmP))~ c(fmP@data@y)), col = "blue", lwd = 3) plot(fitted(fmNB)~ fmP@data@y, xlab = "Observed data", ylab = "Fitted values (NB)", frame = FALSE, ylim = limits1) abline(0,1, lwd = 3) abline(lm(c(fitted(fmNB))~ c(fmP@data@y)), col = "blue", lwd = 3) plot(fitted(fmZIP)~ fmP@data@y, xlab = "Observed data", ylab = "Fitted values (ZIP)", frame = FALSE, ylim = limits1) abline(0,1, lwd = 3) abline(lm(c(fitted(fmZIP)) ~ c(fmP@data@y)), col = "blue", lwd = 3) # Plot residuals vs. fitted values plot(residuals(fmP)~ fitted(fmP), xlab = "Fitted values (P)", ylab = "Residuals", frame = FALSE, xlim = limits1, ylim = limits2) abline(h = 0, lwd = 2) abline(lm(c(residuals(fmP)) ~ c(fitted(fmP))), col = "blue", lwd = 3) plot(residuals(fmNB)~ fitted(fmNB), xlab = "Fitted values (NB)", ylab = "Residuals", frame = FALSE, xlim = limits1, ylim = limits2) abline(h = 0, lwd = 2) abline(lm(c(residuals(fmNB)) ~ c(fitted(fmNB))), col = "blue", lwd = 3) plot(residuals(fmZIP)~ fitted(fmZIP), xlab = "Fitted values (ZIP)", ylab = "Residuals", frame = FALSE, xlim = limits1, ylim = limits2) abline(h = 0, lwd = 2) abline(lm(c(residuals(fmZIP)) ~ c(fitted(fmZIP))), col = "blue", lwd = 3) } # .................................................................................................................. # Function to produce a map of the residuals in AHM1 Section 6.9.3 map.Nmix.resi <- function(fm, x, y){ # Function produces a map of the mean residuals from an N-mixture model # object named fm, which was fit by function pcount in unmarked # Function arguments are the fitted model object and the x and y coordinates # of every site mean.resi <- apply(residuals(fm), 1, mean, na.rm = TRUE) mean.resi[mean.resi == "NaN"] <- mean(mean.resi, na.rm = TRUE) spdata <- data.frame(residuals = mean.resi, x = x, y = y) sp::coordinates(spdata) <- c("x", "y") plot(sp::bubble(spdata, "residuals", col = c("blue", "red"), main = paste("Average residuals of fitted N-mixture model"))) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/plot-Nmix-resi&map-Nmix-resi_AHM1_6-9-3_plotting.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # ppc.plot - AHM1 section 6.8 p253 # Function to plot results from posterior predictive check in AHM1 section 6-8, # for a fitted model object with JAGS, as in that section ppc.plot <- function(fm){ op <- par(mfrow = c(2,2), mar = c(5,5,3,2), cex.lab = 1.3, cex.axis = 1.3) on.exit(par(op)) # Function plots results from posterior predictive check # in AHM1 section 6-8 for a fitted model object with JAGS fit.a <- fm$sims.list$fit.actual # Extract posterior samples fit.s <- fm$sims.list$fit.sim ch <- fm$sims.list$c.hat lims <- c(min(c(fit.a, fit.s)), max(c(fit.a, fit.s))) hist(fit.a, breaks = 100, col = "grey", main = "", xlab = "Fit statistic actual data") hist(fit.s, breaks = 100, col = "grey", main = "", xlab = "Fit statistic simulated data") hist(ch, breaks = 100, col = "grey", main = "", xlab = "Lack of fit ratio (c-hat)") title(paste("c-hat =", round(mean(ch),2))) plot(fit.a[fit.a >= fit.s], fit.s[fit.a >= fit.s], xlab = "Fit statistic actual data", ylab = "Fit statistic simulated data", col = "blue", xlim = lims, ylim = lims, frame = FALSE) title(paste("bpv (proportion red) =", round(mean(fit.s>fit.a),2))) points(fit.a[fit.a < fit.s], fit.s[fit.a < fit.s], col = "red") abline(0,1) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/ppc-plot_AHM1_6-8_plot_posterior_predictive_check.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # sim3Occ - AHM1 section 10.10 p604 # Function to simulate data for static 3-level occupancy models # (introduced in AHM1 Section 10.10) sim3Occ <- function(nunits = 100, nsubunits = 5, nreps = 3, mean.psi = 0.8, beta.Xpsi = 1, sd.logit.psi = 0, mean.theta = 0.6, theta.time.range = c(-1, 1), beta.Xtheta = 1, sd.logit.theta = 0, mean.p = 0.4, p.time.range = c(-2,2), beta.Xp = -1, sd.logit.p = 0, show.plot = TRUE, verbose = TRUE) { # # Function generates 3-level occupancy data # with possibility of site-specific random variation at every level, # "time effects" at the middle and the lower levels and # effects of one distinct covariate at each level. # # Written by Marc Kery, 2014 # # Function arguments: # nunits: Number of main units (large quadrats) # nsubunits: Number of subunits (nested subsamples within # each main unit # nreps: Number of rep surveys in every subunit # mean.psi: Mean large-scale, unit-level occupancy probability (psi) # beta.Xpsi: effect on psi of covariate A (at main unit level) # sd.logit.psi: SD of logit(psi), unstructured site variation in psi # mean.theta: Mean small-scale (subunit) occupancy probability (theta) # theta.time.range: range of theta 'intercepts' for subunits # beta.Xtheta: effect on theta of covariate B (at subunit level) # sd.logit.theta: SD of logit(theta), unstructured site variation in theta # mean.p: Mean per-survey detection probability # p.time.range: range of p 'intercepts' for replicates # beta.Xp: effect on p of covariate C (unit by subunit by replicate) # sd.logit.p: SD of logit(p) if(FALSE) x <- NULL # Fudge to stop R CMD check complaining. # Checks and fixes for input data ----------------------------- nunits <- round(nunits[1]) nsubunits <- round(nsubunits[1]) nreps <- round(nreps[1]) stopifnotProbability(mean.psi) stopifNegative(sd.logit.psi) stopifnotProbability(mean.theta) stopifNegative(sd.logit.theta) stopifnotProbability(mean.p) stopifNegative(sd.logit.p) # ---------------------------------------------------------------- # Create data structures z <- psi <- array(NA, dim = nunits) # Unit occurrence a <- theta <- array(NA, dim = c(nunits, nsubunits)) # Subunit y <- p <- array(NA, dim=c(nunits, nsubunits, nreps) ) # Rep # Create standardised covariate values covA <- as.numeric(array(runif(nunits, -2, 2), dim = nunits)) covB <- array(runif(nunits*nsubunits, -2, 2), dim = c(nunits, nsubunits)) covC <- array(runif(nunits*nsubunits*nreps, -2, 2), dim=c(nunits, nsubunits, nreps) ) # Simulate psi, theta and p and plot all psi <- plogis(qlogis(mean.psi) + beta.Xpsi * covA + rnorm(nunits, 0, sd.logit.psi)) theta.time.effect <- runif(nsubunits, theta.time.range[1], theta.time.range[2]) p.time.effect <- runif(nreps, p.time.range[1], p.time.range[2]) for(j in 1:nsubunits){ theta[,j] <- plogis(qlogis(mean.theta) + theta.time.effect[j]+ (beta.Xtheta*covB)[,j] + array(rnorm(nunits*nsubunits, 0, sd.logit.theta), dim = c(nunits, nsubunits))[,j]) for(k in 1:nreps){ p[,j,k] <- plogis(qlogis(mean.p) + p.time.effect[k] + (beta.Xp*covC)[,j,k]+ array(rnorm(nunits*nsubunits*nreps, 0,sd.logit.p),dim =c(nunits, nsubunits, nreps))[,j,k]) } } # Visualisation of covariate relationships of psi, theta and p if(show.plot) { op <- par(mfrow = c(1,3), mar = c(5,5,5,2), cex.lab = 1.5, cex.axis = 1.5) ; on.exit(par(op)) tryPlot <- try( { plot(covA, psi, xlab = "Unit covariate A", ylab = "psi", ylim = c(0,1), main = "Large-scale occupancy probability (psi)", frame = FALSE) curve(plogis(qlogis(mean.psi) + beta.Xpsi * x), -2, 2, col = "red", lwd = 3, add = TRUE) plot(covB, theta, xlab = "Unit-subunit covariate B", ylab = "theta", ylim = c(0,1), main = "Small-scale occupancy probability/availability \n(theta) (red - time variation)", frame = FALSE) for(j in 1:nsubunits){ curve(plogis(qlogis(mean.theta) + theta.time.effect[j] + beta.Xtheta * x), -2, 2, lwd = 2, col = "red", add = T) } plot(covC, p, xlab = "Unit-subunit-rep covariate C", ylab = "p", ylim = c(0,1), main = "Detection probability (p) \n (red - replicate variation)", frame = FALSE) for(k in 1:nreps){ curve(plogis(qlogis(mean.p) + p.time.effect[k] + beta.Xp * x), -2, 2, lwd = 2, col = "red", add = T) } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Sample three nested Bernoulli distributions # with probabilities psi, z*theta and a * p for (i in 1:nunits) { z[i] <- rbinom(n = 1, size = 1, prob = psi[i]) for (j in 1:nsubunits) { a[i, j] <- rbinom(n = 1, size = 1, prob = z[i] * theta[i,j]) for (k in 1:nreps) { y[i,j,k] <- rbinom(n=1, size = 1, prob = a[i,j]*p[i,j,k]) } # survey } # subunit } # unit sum.z <- sum(z) sum.z.a <- sum(apply(a, 1, sum)>0) obs.sum.z <- sum(apply(apply(y, c(1,2), max), 1, max)) if(verbose) { cat(" Occupied units: ", sum.z, "\n", "Units with >=1 occupied, surveyed subunit:", sum.z.a, "\n", "Observed number of occupied units: ", obs.sum.z, "\n", "\n") } # Output return(list(nunits = nunits, nsubunits = nsubunits, nreps = nreps, mean.psi = mean.psi, beta.Xpsi = beta.Xpsi, sd.logit.psi = sd.logit.psi, psi = psi, mean.theta = mean.theta, theta.time.range = theta.time.range, theta.time.effect = theta.time.effect, beta.Xtheta = beta.Xtheta, sd.logit.theta = sd.logit.theta, theta = theta, mean.p = mean.p, p.time.range = p.time.range, p.time.effect = p.time.effect, beta.Xp = beta.Xp, sd.logit.p = sd.logit.p, p = p, z = z, a = a, y = y, sum.z = sum.z, obs.sum.z = obs.sum.z, sum.z.a = sum.z.a, covA = covA, covB = covB, covC = covC)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/sim3Occ_AHM1_10-10_Simulate_static_3level_occupancy.R
# AHM2 section 3.2.2 # ------ Start of definition of the data simulation function ------ simCJS <- function( n.occ = 6, # number of occasions (e.g., years) n.marked = 20, # number of marked individuals per occasion phi = 0.7, # apparent survival probability p = 0.4, # recapture probability show.plot = TRUE) # whether to show plots or not { # -------- start of function code ---------------------- # This function generates individual capture-histories under a # CJS model with possibly time-dependent parameters. It is based # on code written by Michael Schaub for chap. 7 of the BPA book. # The number of values for interval-specific survival (phi) and # time-specific detection (p) must be ensured to be equal to the # number of occasions (n.occ) minus 1 # # Written by Marc Kery, Sep 2018 # # Changes by Mike Meredith: # Allow n.marked, phi and p to be EITHER scalar OR vector of length n.occ-1 # ... and catch errors. # Modified code for generating z and ch (gives different values with set.seed) # Restore ask and par on exit. # Check and fixes for input data ------------------------- n.occ <- round(n.occ[1]) n.marked <- round(n.marked) stopifnotLength(n.marked, n.occ-1, allow1=TRUE) stopifnotLength(phi, n.occ-1, allow1=TRUE) stopifnotProbability(phi) stopifnotLength(p, n.occ-1, allow1=TRUE) stopifnotProbability(p) # -------------------------------------------------------- # Deal with input if(length(n.marked) == 1) n.marked <- rep(n.marked, n.occ-1) # Annual number of newly marked individuals n.ind <- sum(n.marked) if(length(phi) == 1) phi <- rep(phi, n.occ-1) if(length(p) == 1) p <- rep(p, n.occ-1) # Vector (f) with marking occasion (ie, first capture occasion) f <- rep(1:length(n.marked), n.marked) # Fill the true state matrix (z) and capture-history matrix (ch) z <- matrix(NA, nrow = n.ind, ncol = n.occ) # true states z ## Mike says: easier to remove NAs in ch than to insert them into z ch <- matrix(0, nrow = n.ind, ncol = n.occ) # observed capture history z[f == 1, 1] <- 1 # animals caught on first occasion definitely alive ch[f == 1, 1] <- 1 # ... and definitely caught for(t in 2:n.occ) { z[, t] <- suppressWarnings(rbinom(n.ind, 1, z[, t-1]*phi[t-1])) # have they survived? ## Mike says: rbinom gives NA if z is NA (and warns); that's what we want here ch[, t] <- suppressWarnings(rbinom(n.ind, 1, z[, t]*p[t-1])) # were they caught? ## Mike says: NA is not what we want here, but easy to fix later z[f == t, t] <- 1 # animals first caught on occasion t definitely alive ch[f == t, t] <- 1 # ... and definitely caught } ch[is.na(ch)] <- 0 # fix the unwanted NAs # Tally up number alive, marked and in study area n.alive <- colSums(z, na.rm = TRUE) ## Mike is a fan of colSums and rowSums! # Visualizations if(show.plot){ # Restore graphical settings on exit oldpar <- par(mfrow = c(1, 1), mar = c(5,5,5,3), cex.lab = 1.3, cex.axis = 1.3) oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) tryPlot <- try( { # PLOT 1 # Plot trajectory of phi and p plot(1:(n.occ-1), phi, typ= 'n', ylim = c(0, 1), frame = FALSE, main = 'Trajectories of phi and p') points(1:(n.occ-1), phi, type= 'b', cex = 2, pch = 16, col = 2) points(1:(n.occ-1), p, type= 'b', cex = 2, pch = 16, col = 4, lty=3) legend('top', legend = c('Apparent survival (phi)', 'Recapture (p)'), lty=c(1,3), lwd=2, col=c(2,4), pch=16, cex=2, inset=c(0, -0.05), bty='n', xpd=NA, horiz=TRUE) # PLOT 2 par(mfrow = c(2, 2)) # Plot the true alive/dead pattern (z) mapPalette <- colorRampPalette(c("white", "black")) image(x = 1:n.occ, y = 1:n.ind, z = t(z), col = mapPalette(10), axes = TRUE, xlab = "Year", ylab = "Individual", main = 'z matrix of latent states in the CJS model: \nAlive (black) or dead (white) per individual and occasion') # Plot the observed alive/dead pattern (y, or ch) image(x = 1:n.occ, y = 1:n.ind, z = t(ch), col = mapPalette(10), axes = TRUE, xlab = "Year", ylab = "Individual", main = 'Observed data = capture-history matrix ch in the CJS model: \nDetected (black) or not detected (white) per individual and occasion') box() # Superimpose the two images tmp <- z # copy z into tmp tmp[z==1 & ch == 0] <- -1.1 # Mark detection errors as -1 mapPalette <- colorRampPalette(c("blue", "white", "black")) image(x = 1:n.occ, y = 1:n.ind, z = t(tmp), col = mapPalette(10), axes = TRUE, xlab = "Year", ylab = "Individual", main = 'Combopic of z and ch: not in study (white), alive & detected (black), \nalive & undetected (blue) and dead (grey) per individual and occasion') # Population size trajectory of marked and alive in study area plot(1:n.occ, n.alive, xlab = 'Year', ylab = 'Number alive', main = 'Number of marked animals alive and in study area', frame = FALSE, type = 'b', cex = 2, pch = 16, ylim = c(0, max(n.alive))) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # ---------- arguments input -------------------------- n.occ = n.occ, n.marked = n.marked, phi = phi, p = p, # ------------ generated values ----------------------- z = z, # n.ind x n.occ matrix, 1 if alive and in study area ch = ch, # n.ind x n.occ matrix, 1 if captured f = f, # n.ind vector, occasion marked (= first capture) n.ind = n.ind, # scalar, total number of individuals marked n.alive = n.alive)) # n.occ vector, number alive and in study area } # ------ End of definition of the data simulation function ------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simCJS_AHM2_3-2-2.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simComm - AHM1 section 11.2 p634 # Function to simulate community occupancy or community abundance data # with random species effects for psi/lambda and p (both including # effects of one covariate, 'habitat' for psi/lambda and 'wind speed' for p) # (introduced in AHM1 Section 11.2) simComm <- function(type=c("det/nondet", "counts"), nsites=30, nreps=3, nspecies=100, mean.psi=0.25, sig.lpsi=1, mu.beta.lpsi=0, sig.beta.lpsi=0, mean.lambda=2, sig.loglam=1, mu.beta.loglam=1, sig.beta.loglam=1, mean.p=0.25, sig.lp=1, mu.beta.lp=0, sig.beta.lp=0, show.plot = TRUE) { # # Function simulates data from repeated sampling of a metacommunity # (or spatially structured community) according the model of # Dorazio & Royle (JASA, 2005) for type = "det/nondet" (this is the default) # or under the model of Yamaura et al. (2012) for type = "counts". # # Occupancy probability (psi) or expected abundance (lambda) # can be made dependent on a continuous site covariate 'habitat', # while detection probability can be made dependent an # observational covariate 'wind'. # Both intercept and slope of the two log- or logistic regressions # (for occupancy or expected abundance, respectively, and for detection) # are simulated as draws from a normal distribution with # mean and standard deviation that can be selected using function arguments. # # Specifically, the data are simulated under the following linear models: # # (1) for a type = "det/nondet" (i.e., community occupancy) # ********************************************************* # (occupancy (psi) and detection (p) for site i, replicate j and species k) # psi[i,k] <- plogis(beta0[k] + beta1[k] * habitat[i] # Occupancy # p[i,j,k] <- plogis(alpha0[k] + alpha1[k] * wind[i,j] # Detection # # (2) for a type = "counts" (i.e., community count) # ************************************************ # (exp. abundance (lambda) and detection (p) for site i, rep. j and species k) # lambda[i,k] <- exp(beta0[k] + beta1[k] * habitat[i] # E(N) # p[i,j,k] <- plogis(alpha0[k] + alpha1[k] * wind[i,j] # Detection # # Species-specific heterogeneity in intercepts and slopes is modelled # by up to four independent normal distributions (note: no correlation # between the intercepts as in Dorazio et al. (2006) or Kery & Royle (2008)) # # (1) for a type = "det/nondet" (i.e., community occupancy) # ********************************************************* # beta0 ~ dnorm(qlogis(mean.psi), sig.lpsi) # Mean and SD of normal distr. # beta1 ~ dnorm(mu.beta.lpsi, sig.beta.lpsi) # alpha0 ~ dnorm(qlogis(mean.p), sig.lp) # alpha1 ~ dnorm(mu.beta.lp, sig.beta.lp) # # (2) for a type = "counts" (i.e., community count) # ************************************************ # beta0 ~ dnorm(log(mean.lambda), sig.loglam) # Mean and SD of normal distr. # beta1 ~ dnorm(mu.beta.loglam, sig.beta.loglam) # alpha0 ~ dnorm(qlogis(mean.p), sig.lp) # alpha1 ~ dnorm(mu.beta.lp, sig.beta.lp) # Community occupancy model code partly based on code by Richard Chandler. # # Function arguments: # ******************* # type: "det/nondet" or "counts"; hoose whether you want to # simulate detection/nondetection data or count data # nsites: number of sites # nreps: number of replicate samples (occasions or repeated measurements) # nspecies: total number of species in the area that is sampled by these sites # (regional species pool) # # mean.psi: community mean of occupancy probability over all species # in community (probability scale) # sig.lpsi: community standard deviation of qlogis(occupancy probability intercept) # mu.beta.lpsi: community mean of the effects of 'habitat' # covariate on occupancy probability # sig.beta.lpsi: community standard deviation of the effects of # 'habitat' covariate on occupancy probability # # mean.lambda: community mean of expected abundance over all species # in superpopulation # sig.loglam: community standard deviation of log(lambda intercept) # mu.beta.loglam: community mean of the effects of 'habitat' covariate # on log(lambda) # sig.beta.loglam: community standard deviation of the effects # of 'habitat' covariate on expected abundance # # mean.p: community mean of detection probability over all species # in superpopulation (probability scale) # sig.lp: community standard deviation of qlogis(detection probability intercept) # mu.beta.lp: community mean of the effects of 'wind' covariate # on detection probability # sig.beta.lp: community standard deviation of the effects of 'wind'covariate # on detection probability # show.plot: choose whether to show plots or not. Set to FALSE when # using function in simulations. # Code for simulating binary detection/nondetection data # (according to a community occupancy model) if(FALSE) x <- NULL # A kludge to cope with 'curve's odd way of using 'x' # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nreps <- round(nreps[1]) nspecies <- round(nspecies[1]) stopifnotProbability(mean.psi) stopifNegative(sig.lpsi) stopifNegative(sig.beta.lpsi) stopifNegative(mean.lambda) stopifNegative(sig.loglam) stopifNegative(sig.beta.loglam) stopifnotProbability(mean.p) stopifNegative(sig.lp) stopifNegative(sig.beta.lp) # ---------------------------------------------------------------- type <- match.arg(type) if(show.plot){ # Restore graphical settings on exit ------------------------- oldpar <- par("mfrow", "mar", "cex.axis", "cex.lab") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # ------------------------------------------------------------ } if(type=="det/nondet"){ # Prepare structures to hold data y.all <- y.obs <- p <- array(NA, c(nsites, nreps, nspecies)) dimnames(y.all) <- dimnames(y.obs) <- dimnames(p) <- list(paste("site", 1:nsites, sep=""), paste("rep", 1:nreps, sep=""), paste("sp", 1:nspecies, sep="")) z <- psi <- matrix(NA, nsites, nspecies) dimnames(z) <- dimnames(psi) <- list(paste("site", 1:nsites, sep=""), paste("sp", 1:nspecies, sep="")) detected.at.all <- rep(NA, nspecies) # Create covariates 'habitat' and 'wind' habitat <- sort(rnorm(nsites)) # Note 'habitat gradient' due to sorting wind <- matrix(rnorm(nsites * nreps), ncol=nreps) # Draw species-specific intercepts and slopes from their normal distributions # Build up linear predictors for occupancy and detection # qlogis(1) returns Inf, replace with 500 mu.lpsi <- ifelse(mean.psi == 1, 500, qlogis(mean.psi)) mu.lp <- ifelse(mean.p == 1, 500, qlogis(mean.p)) beta0 <- rnorm(nspecies, mu.lpsi, sig.lpsi) # occupancy intercept beta1 <- rnorm(nspecies, mu.beta.lpsi, sig.beta.lpsi) # occupancy slope on habitat alpha0 <- rnorm(nspecies, mu.lp, sig.lp) # detection intercept alpha1 <- rnorm(nspecies, mu.beta.lp, sig.beta.lp) # detection slope on wind for(k in 1:nspecies){ psi[,k] <- plogis(beta0[k] + beta1[k] * habitat) for(j in 1:nreps){ p[,j,k] <- plogis(alpha0[k] + alpha1[k] * wind[,j]) } } # Distribute species over sites (simulate true state) for(k in 1:nspecies){ z[,k] <- rbinom(nsites, 1, psi[,k]) } occurring.in.sample <- apply(z, 2, max) # Presence/absence at study sites # Measurement of presence/absence (simulate observation) for(k in 1:nspecies) { for(i in 1:nsites){ for(j in 1:nreps) { y.all[i,j,k] <- rbinom(1, z[i,k], p[i,j,k]) } } # detected.at.all[k] <- if(any(y.all[,,k]>0)) TRUE else FALSE detected.at.all[k] <- any(y.all[, , k] > 0) } y.obs <- y.all[,,detected.at.all] # Drop species never detected detected.at.site <- apply(y.obs>0, c(1,3), any) y.sum.all <- apply(y.all, c(1,3), sum) # Detection frequency for all species y.sum.obs <- y.sum.all[,detected.at.all]# Detection frequency for obs. species z.obs <- apply(y.all, c(1,3), max) # Observed presence/absence matrix missed.sites <- z - z.obs # Sites where species missed Ntotal.fs <- sum(occurring.in.sample)# Number of species in finite-sample Ntotal.obs <- sum(detected.at.all) # Observed species richness (all sites) S.true <- apply(z, 1, sum) # Vector of true local richness S.obs <- apply(z.obs, 1, sum) # Vector of observed local richness # Two panels of plots # (1) Species-specific and community responses of occupancy to habitat # (2) Species-specific and community responses of detection to wind if(show.plot){ par(mfrow = c(1,2), mar = c(5,5,5,3), cex.axis = 1.3, cex.lab = 1.3) tryPlot <- try( { # (1) Species-specific and community responses of occupancy to 'habitat' curve(plogis(beta0[1] + beta1[1] * x), -2, 2, main = "Species-specific (black) and community (red) \n response of occupancy to habitat", xlab = "Habitat", ylab = "Occupancy probability (psi)", ylim = c(0,1)) for(k in 2:nspecies){ curve(plogis(beta0[k] + beta1[k] * x), -2, 2, add = TRUE) } curve(plogis(mu.lpsi + mu.beta.lpsi * x), -2, 2, col = "red", lwd = 3, add = TRUE) # (2) Species-specific and community responses of detection to 'wind' curve(plogis(alpha0[1] + alpha1[1] * x), -2, 2, main = "Species-specific (black) and community (red) \n response of detection to wind", xlab = "Wind", ylab = "Detection probability (p)", ylim = c(0,1)) for(k in 2:nspecies){ curve(plogis(alpha0[k] + alpha1[k] * x), -2, 2, add = TRUE) } curve(plogis(mu.lp + mu.beta.lp * x), -2, 2, col = "red", lwd = 3, add = TRUE) # More plots # (3) True presence/absence # (4) Observed detection frequencies # (5) Sites where a species was missed # (6) True and observed histogram of site-specific species richness par(mfrow = c(2,2), cex.axis = 1.3, cex.lab = 1.3) mapPalette1 <- colorRampPalette(c("white", "black")) mapPalette2 <- colorRampPalette(c("white", "yellow", "orange", "red")) # (3) True presence/absence matrix (z) for all species # mapPalette was 2 before image(x = 1:nspecies, y = 1:nsites, z = t(z), col = mapPalette1(4), main = paste("True presence/absence (z) matrix\n (finite-sample N species =", Ntotal.fs,")"), frame = TRUE, xlim = c(0, nspecies+1), ylim = c(0, nsites+1), xlab = "Species", ylab = "Sites") # (4) Observed detection frequency for all species image(x = 1:nspecies, y = 1:nsites, z = t(y.sum.all), col = mapPalette2(100), main = paste("Observed detection frequencies"), xlim = c(0, nspecies+1), ylim = c(0, nsites+1), frame = TRUE, xlab = "Species", ylab = "Sites") # (5) Sites where a species was missed image(x = 1:nspecies, y = 1:nsites, z = t(missed.sites), col = mapPalette1(2), main = paste("Matrix of missed presences\n (obs. N species =", Ntotal.obs,")"), frame = TRUE, xlim = c(0, nspecies+1), ylim = c(0, nsites+1), xlab = "Species", ylab = "Sites") # (6) True and observed distribution of site-specific species richness # plot(table(S.true), col = "red", xlab = "Number of species per site", # xlim = c(0, max(S.true)), ylab = "Frequency", # main = "True (red) vs. observed (blue) \n number of species per site") # points(table(S.obs+(nspecies/100)), col = "blue") histCount(S.obs, S.true, xlab = "Number of species per site", main = "True (red) vs. observed (blue) \n number of species per site") # See file "histCount_helper.R" for details of this function. }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # input arguments type=type, nsites=nsites, nreps=nreps, nspecies=nspecies, mean.psi=mean.psi, mu.lpsi=mu.lpsi, sig.lpsi=sig.lpsi, mu.beta.lpsi=mu.beta.lpsi, sig.beta.lpsi=sig.beta.lpsi, mean.p=mean.p, mu.lp=mu.lp, sig.lp=sig.lp, mu.beta.lp=mu.beta.lp, sig.beta.lp=sig.beta.lp, # generated values habitat=habitat, wind=wind, psi=psi, p=p, z=z, z.obs = z.obs, y.all=y.all, y.obs=y.obs, y.sum.all=y.sum.all, y.sum.obs=y.sum.obs, Ntotal.fs = Ntotal.fs, Ntotal.obs = Ntotal.obs, S.true = S.true, S.obs = S.obs)) } # endif(type=="det/nondet") # Code for simulating community abundance data # (according to a community abundance model) if(type=="counts"){ # Prepare structures to hold data y.all <- y.obs <- p <- array(NA, c(nsites, nreps, nspecies)) dimnames(y.all) <- dimnames(y.obs) <- dimnames(p) <- list(paste("site", 1:nsites, sep=""), paste("rep", 1:nreps, sep=""), paste("sp", 1:nspecies, sep="")) N <- lambda <- matrix(NA, nsites, nspecies) dimnames(N) <- dimnames(lambda) <- list(paste("site", 1:nsites, sep=""), paste("sp", 1:nspecies, sep="")) detected.at.all <- rep(NA, nspecies) # Create covariates 'habitat' and 'wind' habitat <- sort(rnorm(nsites)) # Note 'habitat gradient' due to sorting wind <- matrix(rnorm(nsites * nreps), ncol=nreps) # Draw species-specific intercepts and slopes from their normal distributions # Build up linear predictors for occupancy and detection mu.loglam <- log(mean.lambda) mu.lp <- ifelse(mean.p == 1, 500, qlogis(mean.p)) beta0 <- rnorm(nspecies, mu.loglam, sig.loglam) # lambda intercept beta1 <- rnorm(nspecies, mu.beta.loglam, sig.beta.loglam) # lambda slope on habitat alpha0 <- rnorm(nspecies, mu.lp, sig.lp) # detection intercept alpha1 <- rnorm(nspecies, mu.beta.lp, sig.beta.lp) # detection slope on wind for(k in 1:nspecies){ lambda[,k] <- exp(beta0[k] + beta1[k] * habitat) for(j in 1:nreps){ p[,j,k] <- plogis(alpha0[k] + alpha1[k] * wind[,j]) } } # Distribute species over sites (simulate true abundance state) for(k in 1:nspecies){ N[,k] <- rpois(nsites, lambda[,k]) } tmp <- apply(N, 2, sum) occurring.in.sample <- as.numeric(tmp > 0) # Presence/absence in study area # Measurement of abundance (simulate counts) for(k in 1:nspecies) { for(i in 1:nsites){ for(j in 1:nreps) { y.all[i,j,k] <- rbinom(1, N[i,k], p[i,j,k]) } } detected.at.all[k] <- if(any(y.all[,,k]>0)) TRUE else FALSE } y.obs <- y.all[,,detected.at.all] # Drop species never detected detected.at.site <- apply(y.obs>0, c(1,3), any) ymax.obs <- apply(y.all, c(1,3), max) # Observed max count matrix Ntotal.fs <- sum(occurring.in.sample)# Number of species in finite-sample Ntotal.obs <- sum(detected.at.all) # Observed species richness (all sites) # Two panels of plots # (1) Species-specific and community responses of lambda to habitat # (2) Species-specific and community responses of detection to wind if(show.plot){ par(mfrow = c(1,2), mar = c(5,5,5,3), cex.axis = 1.3, cex.lab = 1.3) tryPlot <- try( { # (1) Species-specific and community responses of occupancy to 'habitat' curve(exp(beta0[1] + beta1[1] * x), -2, 2, main = "Species-specific (black) and community (red) \n response of lambda to habitat", xlab = "Habitat", ylab = "Expected abundance (lambda)") for(k in 1:nspecies){ curve(exp(beta0[k] + beta1[k] * x), -2, 2, add = TRUE) } curve(exp(mu.loglam + mu.beta.loglam * x), -2, 2, col = "red", lwd = 3, add = TRUE) # (2) Species-specific and community responses of detection to 'wind' curve(plogis(alpha0[1] + alpha1[1] * x), -2, 2, main = "Species-specific (black) and community (red) \n response of detection to wind", xlab = "Wind", ylab = "Detection probability (p)", ylim = c(0,1)) for(k in 2:nspecies){ curve(plogis(alpha0[k] + alpha1[k] * x), -2, 2, add = TRUE) } curve(plogis(mu.lp + mu.beta.lp * x), -2, 2, col = "red", lwd = 3, add = TRUE) # More plots # (3) True abundance N (log10 + 1) # (4) Observed detection frequencies (log10 + 1) # (5) Ratio of max count to true N # (6) log(max count) vs. log (true N) par(mfrow = c(2,2), mar = c(5,6,4,2), cex.axis = 1.3, cex.lab = 1.3) mapPalette <- colorRampPalette(c("yellow", "orange", "red")) # (3) True abundance matrix (log(N+1)) for all species # mapPalette was 2 before image(x = 1:nspecies, y = 1:nsites, z = log10(t(N)+1), col = mapPalette(100), main = paste("True log(abundance) (log10(N)) matrix\n (finite-sample N species =", sum(occurring.in.sample),")"), frame = TRUE, xlim = c(0, nspecies+1), zlim = c(0, log10(max(N))), xlab = "Species", ylab = "Sites") # (4) Observed maximum counts for all species image(x = 1:nspecies, y = 1:nsites, z = log10(t(ymax.obs)+1), col = mapPalette(100), main = paste("Observed maximum counts (log10 + 1)"), xlim = c(0, nspecies+1), frame = TRUE, xlab = "Species", ylab = "Sites", zlim = c(0, log10(max(N)))) # (5) Ratio of max count to true N ratio <- ymax.obs/N ratio[ratio == "NaN"] <- 1 image(x = 1:nspecies, y = 1:nsites, z = t(ratio), col = mapPalette(100), main = paste("Ratio of max count to true abundance (N)"), xlim = c(0, nspecies+1), frame = TRUE, xlab = "Species", ylab = "Sites", zlim = c(0, 1)) # (6) True N and observed max count versus 'habitat' lims <- c(0, log10(max(N+1))) plot(log(N), log(ymax.obs), xlab = "True abundance (log10(N+1))", ylab = "Observed max count \n(log10(max+1))", xlim = lims, ylim = lims, main = "Observed vs. true N (log10 scale)" ) abline(0,1) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # input arguments type=type, nsites=nsites, nreps=nreps, nspecies=nspecies, mean.lambda=mean.lambda, mu.loglam=mu.loglam, sig.loglam=sig.loglam, mu.beta.loglam=mu.beta.loglam, sig.beta.loglam=sig.beta.loglam, mean.p=mean.p, mu.lp=mu.lp, sig.lp=sig.lp, mu.beta.lp=mu.beta.lp, sig.beta.lp=sig.beta.lp, # generated values habitat=habitat, wind=wind, lambda=lambda, p=p, N=N, # y.obs=y.obs, y.all=y.all, y.obs=y.obs, ymax.obs=ymax.obs, Ntotal.fs=Ntotal.fs, Ntotal.obs=Ntotal.obs)) } # endif(type=="counts") }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simComm_AHM1_11-2_Simulate_community_data.R
# 16.2 A general function to simulate data under the DCM model simDCM <- function(nspecies = 50, nsites = 100, nsurveys = 3, nyears = 10, mean.psi1 = 0.4, sig.lpsi1 = 1, mu.beta.lpsi1 = 0, sig.beta.lpsi1 = 0, range.mean.phi = c(0.8, 0.8), sig.lphi = 1, mu.beta.lphi = 0, sig.beta.lphi = 0, range.mean.gamma = c(0.2, 0.2), sig.lgamma = 1, mu.beta.lgamma = 0, sig.beta.lgamma = 0, range.mean.p = c(0.5, 0.5), sig.lp = 1, mu.beta.lp = 0, sig.beta.lp = 0, range.beta1.survey = c(0, 0), range.beta2.survey = c(0, 0), trend.sd.site = c(0, 0), trend.sd.survey = c(0, 0), show.plot = TRUE, verbose = TRUE) { # # Written by Marc Kery, 28 Nov 2016 # # Function is based on the dynocc function 'simDynocc' (AHM2, Chap 15) and # on the community occupancy function 'simComm' (AHM1, Chap 11). # # Function to simulate detection/nondetection data under a general # dynamic community (site-occ) model, including: # * annual variation in the probabilities of patch persistence, colonization # and detection is specified by the bounds of a uniform distribution. # * species heterogeneity around the means is specified by the SD of a normal # distribution and expressed on the logit scale # * one covariate is allowed to a parameter (site covariate for psi1, # site-year covariate for phi and gamma and site-year-rep for p). # Each covariate is allowed to differ among species again according # to a logit-normal model of heterogeneity. # * Additional detection heterogeneity at the site- or the survey level, # with the possibility of a temporal trend in this heterogeneity. E.g., # an annual trend in detection heterogeneity at the site or # the survey level is specified by the value in the first and the last year. # Hence, range.sd.site = c(0, 1) will result in a linear trend in the # magnitude of site heterogeneity in detection from 0 in the first year to # 1 in the last year. # * Additional detection heterogeneity that varies over the survey (= occasion) # according to a quadratic effect of occasion number (to model phenology of # an insect species for instance). # * These last two types of detection heterogeneity are not (yet) allowed # to be species-specific. # # Function arguments: # ------------------- # *** Sample size arguments *** # nspecies - Number of species (typically called N in AHM book) # nsites - Number of sites (M) # nsurveys - Number of replicate surveys within a year (= season) (J) # nyears - Number of years (or 'seasons') (T) # # *** Arguments for mean parameters for the intercepts *** # mean.psi1 - average occupancy probability in first year # range.mean.p - bounds of uniform distribution from which annual p drawn # range.mean.phi and range.mean.gamma - same for persistence and colonization prob. # ------------------- # *** Arguments for mean parameters for the slopes *** # mu.beta.lpsi1, mu.beta.lphi, mu.beta.lgamma, mu.beta.lp - coefficients of # covariates in probabilities of initial occupancy, persistence, # colonization and detection. These are the probability-scale means of # the normal distibutions, from which species-specific slopes are drawn # ------------------- # *** Args. for species-specific heterogeneity in intercepts and slopes *** # sig.lpsi1: sd of the normal distribution from which species-specific occupancy # intercepts are drawn (centered on logit(mean.psi1)), on logit scale # sig.beta.lpsi1: sd of the normal distribution from which species-specific # slopes are drawn (centered on mu.beta.lpsi1) # sig.lphi: sd of the normal distribution from which species-specific persistence # intercepts are drawn (centered on logit(mean.phi), which are year-specific), # on logit scale # sig.beta.lphi: sd of the normal distribution from which species-specific # persistence slopes are drawn (centered on mu.beta.lphi) # sig.lgamma: sd of the normal distribution from which species-specific # colonization intercepts are drawn (centered on logit(mean.gamma), # which are year-specific), on logit scale # sig.beta.lgamma: sd of the normal distribution from which species-specific # colonization slopes are drawn (centered on mu.beta.lgamma) # sig.lp: sd of the normal distribution from which species-specific # detection intercepts are drawn (centered on logit(mean.p), # which are year-specific), on logit scale # sig.beta.lp: sd of the normal distribution from which species-specific # detection slopes are drawn (centered on mu.beta.lp) # ------------------- # *** Args. for detection heterogeneity among sites and surveys # (this part of the model is NOT species-specific) *** # trend.sd.site: sd of normal distribution to model logit-normal noise in p # at the site level in the first and the last year of the simulation. # trend.sd.survey: sd of normal distribution to model logit-normal noise in p # at the site/year/rep = 'survey' level, in the first and the last year # For the sd and error.rate arguments, if the two values in the range are the # same, a constant value is assumed over time, while if they are different, # a linear trend is assumed over time. # ------------------- # *** Args. for detection heterogeneity among occasions within a season # (this part of model again NOT species-specific) # range.beta1.survey is the range of the annual variation in the linear effect # of survey (i.e., of month 1-12) on the product of # availability and detection linear and quadratic effect of survey # range.beta2.survey is the same for the quadratic effect of survey # # show.plot: if TRUE, plots are produced. Usually set to FALSE when running sims. # # Checks and fixes for input data ----------------------------- nspecies <- round(nspecies[1]) nsites <- round(nsites[1]) nsurveys <- round(nsurveys[1]) nyears <- round(nyears[1]) stopifnotGreaterthan (nyears, 1) stopifnotProbability(mean.psi1) stopifNegative(sig.lpsi1, allowZero=TRUE) # mu.beta.lpsi1 stopifNegative(sig.beta.lpsi1, allowZero=TRUE) stopifnotProbability(range.mean.phi) # bounds stopifNegative(sig.lphi, allowZero=TRUE) stopifNegative(sig.beta.lphi, allowZero=TRUE) stopifnotProbability(range.mean.gamma) # bounds stopifNegative(sig.lgamma, allowZero=TRUE) stopifNegative(sig.beta.lgamma, allowZero=TRUE) stopifnotProbability(range.mean.p) # bounds stopifNegative(sig.lp, allowZero=TRUE) stopifNegative(sig.beta.lp, allowZero=TRUE) # -------------------------------------------- # Set up arrays needed spec <- 1:nspecies # Species site <- 1:nsites # Sites year <- 1:nyears # Years # visit <- 1:nsurveys # Visit # month <- 1:nsurveys # Months (= surveys) survey <- 1:nsurveys # Months (= surveys) psi <- muZ <- z <- array(dim = c(nsites, nyears, nspecies), dimnames = list(paste('Site', site, sep = ''), paste('Year', year, sep = ''), paste('Spec', spec, sep = ''))) # Occupancy, occurrence phi <- gamma <- array(NA, dim = c(nsites, (nyears-1), nspecies), dimnames = list(paste('Site', site, sep = ''), paste('Year', year[-nyears], sep = ''), paste('Spec', spec, sep = ''))) # Survival, colonisation y <- p <- array(NA, dim = c(nsites, nsurveys, nyears, nspecies), dimnames = list(paste('Site', site, sep = ''), paste('Survey', survey, sep = ''), paste('Year', year, sep = ''), paste('Spec', spec, sep = '')))# Det. hist and p # Create covariates (same for all species) Xpsi1 <- matrix(runif(nsites, -2, 2), ncol = 1, dimnames = list(paste('Site',site,sep=''), NULL)) # Site covariate for psi1 Xphi <- array(runif(nsites*nyears, -2, 2), dim = c(nsites,nyears), dimnames = list(paste('Site',site,sep=''), paste('Year',year,sep =''))) # Yearly-site cov Xgamma <- array(runif(nsites*nyears, -2, 2), dim = c(nsites,nyears), dimnames = list(paste('Site',site,sep=''), paste('Year',year,sep =''))) # Yearly-site cov Xp <- array(runif(nsites*nsurveys*nyears,-2,2),dim=c(nsites,nsurveys,nyears), dimnames = list(paste('Site', site, sep = ''), paste('Survey', survey, sep = ''), paste('Year', year, sep = ''))) # Observation cov. # (1) Simulate all parameter values # (a) State process parameters # initial occupancy for all species mu.lpsi1 <- ifelse(mean.psi1 == '1', 500, qlogis(mean.psi1)) beta0.lpsi <- rnorm(nspecies, mu.lpsi1, sig.lpsi1) # initial occupancy intercept beta1.lpsi <- rnorm(nspecies, mu.beta.lpsi1, sig.beta.lpsi1) # occ. slope on Xpsi1 for(s in 1:nspecies){ psi[,1,s] <- plogis(beta0.lpsi[s] + beta1.lpsi[s] * Xpsi1) # psi1 } # persistence and colonization for all species beta0.lphi <- beta0.lgamma <- array(dim = c(nspecies, nyears-1)) mean.phi <- runif(n = nyears-1, min = min(range.mean.phi), max = max(range.mean.phi)) mean.gamma <- runif(n = nyears-1, min = min(range.mean.gamma), max = max(range.mean.gamma)) mu.lphi <- ifelse(mean.phi == '1', 500, qlogis(mean.phi)) mu.lgamma <- ifelse(mean.gamma == '1', 500, qlogis(mean.gamma)) eps.lphi <- rnorm(nspecies, 0, sig.lphi) # species effect in logit(phi) intercept eps.lgamma <- rnorm(nspecies, 0, sig.lgamma) # spec effect in logit(gam) intercept for(t in 1:(nyears-1)){ beta0.lphi[,t] <- mu.lphi[t] + eps.lphi # logit(phi) intercept beta0.lgamma[,t] <- mu.lgamma[t] + eps.lgamma # logit(gamma) intercept } beta1.lphi <- rnorm(nspecies, mu.beta.lphi, sig.beta.lphi) # slope of logit(phi) on Xphi beta1.lgamma <- rnorm(nspecies, mu.beta.lgamma, sig.beta.lgamma) # slope of logit(gamma) on Xphi for(s in 1:nspecies){ for(t in 1:(nyears-1)){ phi[,t, s] <- plogis(beta0.lphi[s, t] + beta1.lphi[s] * Xphi[,t]) gamma[,t,s] <- plogis(beta0.lgamma[s, t] + beta1.lgamma[s] * Xgamma[,t]) } } # (b) Observation process parameters beta0.lp <- array(dim = c(nspecies, nyears)) mean.p <- runif(n = nyears, min = min(range.mean.p), max = max(range.mean.p)) mu.lp <- ifelse(mean.p == '1', 500, qlogis(mean.p)) eps.lp <- rnorm(nspecies, 0, sig.lp) # species effect in logit(p) intercept for(t in 1:nyears){ beta0.lp[,t] <- mu.lp[t] + eps.lp # logit(p) intercept } beta1.lp <- rnorm(nspecies, mu.beta.lp, sig.beta.lp) # slope of logit(p) on Xp beta1 <- runif(n = nyears, min = min(range.beta1.survey), max = max(range.beta1.survey)) beta2 <- runif(n = nyears, min = min(range.beta2.survey), max = max(range.beta2.survey)) sd.site <- seq(from = trend.sd.site[1], to = trend.sd.site[2], length.out = nyears) sd.survey <- seq(from = trend.sd.survey[1], to = trend.sd.survey[2], length.out = nyears) # Create site and survey random effects for(i in 1:nsites){ for(t in 1:nyears){ eps1 <- rnorm(n = nsites, mean = 0, sd = sd.site[t]) # Site random eff. eps2 <- rnorm(n = nsurveys, mean = 0, sd = sd.survey[t]) # Survey random eff. } } for(s in 1:nspecies){ for(t in 1:nyears){ # Years for(j in 1:nsurveys){ # Occasions interpreted as surveys p[,j,t,s] <- plogis(beta0.lp[s, t] + beta1.lp[s] * Xp[,j,t] + eps1 + eps2[j] + beta1[t] * (j - (nsurveys/2)) + beta2[t] * (j - (nsurveys/2))^2) } } } # (2) Simulate the true system dynamics (state process) # First year for(s in 1:nspecies){ z[,1, s] <- rbinom(nsites, 1, psi[,1,s]) # Initial occurrence state } for(s in 1:nspecies){ # Loop over species for(t in 2:nyears){ # Loop over years muZ[,t,s] <- z[,t-1,s] * phi[,t-1,s] + (1-z[,t-1,s]) * gamma[,t-1,s] z[,t,s] <- rbinom(nsites, 1, muZ[,t,s]) } } # (3) Simulate observation process to get the observed data for(s in 1:nspecies){ # Loop over species for(t in 1:nyears){ # Loop over years for(j in 1:nsurveys){ # Loop over replicates prob <- z[,t,s] * p[,j,t,s] # zero out p for unoccupied sites y[,j,t,s] <- rbinom(nsites, 1, prob) } } } # (4) Compute annual population occupancy for(s in 1:nspecies){ # Loop over species for (t in 2:nyears){ psi[,t,s] <- psi[,t-1,s] * phi[,t-1,s] + (1-psi[,t-1,s]) * gamma[,t-1,s] } } # Compute some derived stuff n.occ <- apply(z, 2:3, sum) # Number of occupied sites psi.fs <- apply(z, 2:3, mean) # Finite-sample occupancy proportion mean.psi <- apply(psi, 2:3, mean) # Average psi over sites z.obs <- apply(y, c(1,3,4), max) # Observed value of z matrix n.occ.obs <- apply(z.obs, 2:3, sum) # Observed number of occupied sites psi.obs <- apply(z.obs, 2:3, mean) # Observed occupancy (finite sample) # Total number of species that occur in the sampled sites tmp1 <- apply(z, 2:3, max) # True presence per year and species nyears.pres <- apply(tmp1, 2, sum) # Number of years when species present nspecies.pres <- sum(nyears.pres > 0) # Number of species ever present # Total number of species that were detected anywhere in the sampled sites tmp2 <- apply(z.obs, 2:3, max) # Observed presence per year and species nyears.det <- apply(tmp2, 2, sum) # Number of years when species detected nspecies.det <- sum(nyears.det > 0) # Number of species ever detected # Print out number of occurring and detected species if(verbose) { cat(paste("\n *** Number of species ever occurring:", nspecies.pres, "\n *** Number of species ever detected:", nspecies.det, "\n *** Avg. number of years of occurrence:", round(mean(nyears.pres), 3), "\n *** Avg. number of years with detection:", round(mean(nyears.det), 3), "\n\n")) } # Compute the average survey product of availability and detection # (ignoring the other terms in the model for detection) p.survey <- array(NA, dim = c(nsurveys, nyears)) for(t in 1:nyears){ # Years p.survey[,t] <- plogis(mean(beta0.lp[, t]) + beta1[t] * (survey - (nsurveys/2)) + beta2[t] * (survey - (nsurveys/2))^2) } # (5) Plots of stuff if(show.plot){ oldpar <- par(mfrow = c(3, 2), mar = c(5,5,4,3), cex.lab = 1.2) oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) tryPlot <- try( { # Get predicted covariate relationships and plot them in single graph pred.cov <- seq(-2, 2, length.out = 100) psi.pred <- phi.pred <- gamma.pred <- p.pred <- array(dim = c(length(pred.cov), nspecies)) for(s in 1:nspecies){ psi.pred[,s] <- plogis(beta0.lpsi[s] + beta1.lpsi[s] * pred.cov) phi.pred[,s] <- plogis(mean(beta0.lphi[s,]) + beta1.lphi[s] * pred.cov) gamma.pred[,s] <- plogis(mean(beta0.lgamma[s,]) + beta1.lgamma[s] * pred.cov) p.pred[,s] <- plogis(mean(beta0.lp[s,]) + beta1.lp[s] * pred.cov) } matplot(pred.cov, psi.pred, type = 'l', lty = 1, ylim = c(0,1), lwd = 2, main = paste('Occupancy (', nspecies, ' species, ', nsites, ' sites)', sep = ''), xlab = 'Covariate', ylab = 'Initial occupancy prob.', las = 1, frame = FALSE) matplot(pred.cov, phi.pred, type = 'l', lty = 1, ylim = c(0,1), lwd = 2, main = paste('Persistence (averaged over years,\n', nspecies, ' species, ', nsites, ' sites)', sep = ''), xlab = 'Covariate', ylab = 'Persistence prob.', las = 1, frame = FALSE) matplot(pred.cov, gamma.pred, type = 'l', lty = 1, ylim = c(0,1), lwd = 2, main = paste('Colonization (averaged over years,\n', nspecies, ' species, ', nsites, ' sites)', sep = ''), xlab = 'Covariate', ylab = 'Colonization prob.', las = 1, frame = FALSE) matplot(pred.cov, p.pred, type = 'l', lty = 1, ylim = c(0,1), lwd = 2, main = paste('Detection (averaged over years,\n', nspecies, ' species, ', nsites, ' sites)', sep = ''), xlab = 'Covariate', ylab = 'Detection prob.', las = 1, frame = FALSE) # Plot the average surveyal product of availability and detection # (ignoring the other terms in the model for detection) matplot(survey, p.survey, type = 'l', lty = 1, lwd = 2, main = 'Seasonal pattern in p over the years \n(only survey terms, same for all species)', xlab = 'Survey', ylab = 'Detection probability', ylim = c(0,1)) # Histo of detection hist(p, col = 'grey', breaks = 50, xlim = c(0,1), main = 'Detection probability p\n (all species, sites etc.)') # Annual (and species-specific) variation in persistence, colonisation, and detection matplot(t(plogis(beta0.lphi)), type = 'l', lty = 1, lwd = 2, ylim = c(0,1), xlab = 'Year', ylab = 'Persistence intercept', main = 'Average persistence per year and species', las = 1, frame = FALSE) matplot(t(plogis(beta0.lgamma)), type = 'l', lty = 1, lwd = 2, ylim = c(0,1), xlab = 'Year', ylab = 'Colonization intercept', main = 'Average colonization per year and species', las = 1, frame = FALSE) matplot(t(plogis(beta0.lp)), type = 'l', lty = 1, lwd = 2, ylim = c(0,1), xlab = 'Year', ylab = 'Detection intercept', main = 'Average detection per year and species', las = 1, frame = FALSE) # Histo of true mean occupancy probability (all species and years) hist(mean.psi, col = 'grey', breaks = 50, xlim = c(0,1), main = 'Mean occupancy probability psi1\n (all species and years)') # Plot realised and apparent proportion of occupied sites matplot(year, mean.psi, type = "l", lty = 1, xlab = "Year", ylab = "Occupancy prob.", xlim = c(0,nyears+1), ylim = c(0,1), lwd = 2, frame.plot = FALSE, las = 1, main = paste('True occupancy (', nspecies, ' species, ', nsites, ' sites)', sep = '') ) matplot(year, psi.obs, type = "l", lty = 1, xlab = "Year", ylab = "Occupancy prob.", xlim = c(0,nyears+1), ylim = c(0,1), lwd = 2, frame.plot = FALSE, las = 1, main = paste('Observed occupancy (', nspecies, ' species, ', nsites, ' sites)', sep = '')) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Return data return(list(nspecies = nspecies, nsites = nsites, nsurveys = nsurveys, nyears = nyears, mean.psi1 = mean.psi1, sig.lpsi1 = sig.lpsi1, mu.beta.lpsi1 = mu.beta.lpsi1, sig.beta.lpsi1 = sig.beta.lpsi1, range.mean.phi = range.mean.phi, sig.lphi = sig.lphi, mu.beta.lphi = mu.beta.lphi, sig.beta.lphi = sig.beta.lphi, range.mean.gamma = range.mean.gamma, sig.lgamma = sig.lgamma, mu.beta.lgamma = mu.beta.lgamma, sig.beta.lgamma = sig.beta.lgamma, range.mean.p = range.mean.p, sig.lp = sig.lp, mu.beta.lp = mu.beta.lp, sig.beta.lp = sig.beta.lp, range.beta1.survey = range.beta1.survey, range.beta2.survey = range.beta2.survey, trend.sd.site = trend.sd.site, trend.sd.survey = trend.sd.survey, Xpsi1 = Xpsi1, Xphi = Xphi, Xgamma = Xgamma, Xp = Xp, beta0.lpsi = beta0.lpsi, beta1.lpsi = beta1.lpsi, psi = psi, mean.phi = mean.phi, mean.gamma = mean.gamma, eps.lphi = eps.lphi, eps.lgamma = eps.lgamma, beta0.lphi = beta0.lphi, beta0.lgamma = beta0.lgamma, beta1.lphi = beta1.lphi, beta1.lgamma = beta1.lgamma, phi = phi, gamma = gamma, mean.p = mean.p, eps.lp = eps.lp, beta0.lp = beta0.lp, beta1.lp = beta1.lp, beta1 = beta1, beta2 = beta2, sd.site = sd.site, sd.survey = sd.survey, eps1 = eps1, eps2 = eps2, n.occ = n.occ, psi.fs = psi.fs, mean.psi = mean.psi, z.obs = z.obs, n.occ.obs = n.occ.obs, psi.obs = psi.obs, nyears.pres = nyears.pres, nspecies.pres = nspecies.pres, nyears.det = nyears.det, nspecies.det = nspecies.det, z = z, p = p, y = y)) } # ------------------ End function definition ---------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDCM.R
# AHM2 section 2.5.1, originally called 'DMsim.fn' # A function to simulate data for a Dail-Madsen model without covariates. # Plots added 2019-08-16, see email from Marc. # ---------- simulator function ------------------------ simDM0 <- function(nsites = 50, nsurveys = 3, nyears = 5, lambda = 4, gamma = 1.5, phi = 0.8, p = 0.7, show.plots=TRUE){ ## Simulation for multiple-visit data (from pcountOpen help file) ## No covariates, constant time intervals between primary periods # nsites: Number of sites # nsurveys: Number of replicate (secondary) samples within period of closure # nyears: Number of primary samples: years, seasons etc. # lambda: Initial expected abundance # gamma, phi: recruitment and apparent survival rates, respectively # p: detection probability # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nsurveys <- round(nsurveys[1]) nyears <- round(nyears[1]) stopifNegative(lambda, allowZero=FALSE) stopifnotProbability(phi) stopifnotProbability(p) # -------------------------------------------- y <- array(NA, dim = c(nsites, nyears, nsurveys)) N <- matrix(NA, nsites, nyears) S <- R <- matrix(NA, nsites, nyears-1) N[,1] <- rpois(nsites, lambda) # Initial state for(t in 1:(nyears-1)) { # State dynamics S[,t] <- rbinom(nsites, N[,t], phi) # Number of survivors R[,t] <- rpois(nsites, gamma) # Number of recruits N[,t+1] <- S[,t] + R[,t] # Number in population next year } for(j in 1:nsurveys){ # Observation process y[,,j] <- rbinom(nsites*nyears, N, p) } # Put observed data into two dimensions yy <- array(NA, dim = c(nsites, nsurveys*nyears)) for(t in 1:nyears){ yy[,(nsurveys * t-(nsurveys-1)):(nsurveys*t)] <- y[,t,] } if(show.plots) { op <- par(mfrow = c(2,2), mar = c(5,5,4,3), cex.lab = 1.5, cex.axis = 1.5) on.exit(par(op)) matplot(t(N), type = 'l', main = paste('Population trajectories under a simple DM model \nwith lambda =', lambda, ', phi =', phi, 'and gamma =', gamma, ''), lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'N') matplot(t(S), type = 'l', main = 'Number of apparent survivors', lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'S') hist(N[,1], main = 'Distribution of N in first year', breaks = 50, col = 'grey') hist(N[,nyears], main = 'Distribution of N in last year', breaks = 50, col = 'grey') } return(list( # -------------- arguments input ------------------- nsites = nsites, nsurveys = nsurveys, nyears = nyears, lambda = lambda, gamma = gamma, phi = phi, p = p, # ----------- values generated ------------------------- N = N, # true number of individuals, nsites x nyears S = S, R = R, # number of survivors, recruits, nsites x (nyears-1) y = y, # number detected, nsites x nyears x nsurveys yy = yy)) # number detected as a 2D matrix, nsites x (nyears*nsurveys) } # -------------------- end function ----------------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDM0_AHM2_2-5-1_simpleDailMadsen_model.R
# AHM2 section 2.5.5 A function to simulate data for a Dail-Madsen model with covariates. simDM <- function(nsites = 50, nsurveys = 3, nyears = 5, mean.lambda = 4, mean.gamma.rel = 0.5, mean.phi = 0.8, mean.p = 0.7, beta.lam = 1, beta.gamma = 1, beta.phi = -1, beta.p = -1, show.plots=TRUE){ # Simulation for multiple-visit data, # constant time intervals between primary periods # nsites: Number of sites # nsurveys: Number of replicate (secondary) samples within period of closure # nyears: Number of primary samples: years, seasons etc. # mean.lambda: Initial expected abundance at cov.lam = 0 # mean.gamma.rel, mean.phi: recruitment and apparent survival rates, # respectively, at values of cov.gamma and cov.phi equal to 0 # mean.p: detection probability at cov. p = 0 # beta.X is the slope of parameter X (link transformed) on the respective covariate # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nsurveys <- round(nsurveys[1]) nyears <- round(nyears[1]) stopifNegative(mean.lambda, allowZero=FALSE) stopifnotProbability(mean.gamma.rel) stopifnotProbability(mean.phi) stopifnotProbability(mean.p) # -------------------------------------------- y <- p <- array(NA, dim = c(nsites, nyears, nsurveys)) N <- matrix(NA, nsites, nyears) S <- R <- matrix(NA, nsites, nyears-1) cov.lam <- runif(nsites, -1, 1) cov.gamma <- runif(nsites, -1, 1) cov.phi <- runif(nsites, -1, 1) cov.p <- array(runif(nsites*nyears*nsurveys, -1, 1), dim = dim(y)) lambda <- exp(log(mean.lambda) + beta.lam * cov.lam) N[,1] <- rpois(nsites, lambda) # Initial state phi <- plogis(qlogis(mean.phi) + beta.phi * cov.phi) gamma <- exp(log(mean.gamma.rel) + beta.gamma * cov.gamma) for(t in 1:(nyears-1)) { # State dynamics S[,t] <- rbinom(nsites, N[,t], phi) R[,t] <- rpois(nsites, N[,(t)]*gamma) # Simulate in 'relative' mode N[,t+1] <- S[,t] + R[,t] } for(i in 1:nsites){ # Observation process for(t in 1:nyears){ for(j in 1:nsurveys){ p[i,t,j] <- plogis(qlogis(mean.p) + beta.p * cov.p[i,t,j]) y[i,t,j] <- rbinom(1, N[i,t], p[i,t,j]) } } } # Put observed data into two dimensions yy <- ccov.p <- array(NA, dim = c(nsites, nsurveys*nyears)) for(t in 1:nyears){ yy[,(nsurveys * t-(nsurveys-1)):(nsurveys*t)] <- y[,t,] ccov.p[,(nsurveys * t-(nsurveys-1)):(nsurveys*t)] <- cov.p[,t,] } # Visualisations if(show.plots) { op <- par(mfrow = c(3, 2), mar = c(5,5,4,3), cex.lab = 1.5, cex.axis = 1.5) on.exit(par(op)) tryPlot <- try( { matplot(t(N), type = 'l', main = paste('Population trajectories under a simple DM model \nwith mean lambda =', mean.lambda, ', mean gamma =', mean.gamma.rel, ' and mean phi =', mean.phi, ''), lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'N') matplot(t(S), type = 'l', main = 'Number of apparent survivors', lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'Survivors (S)') matplot(t(R), type = 'l', main = 'Number of recruits', lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'Recruits (R)') matplot(t(apply(p, c(1,2), mean)), type = 'l', main = 'Average detection probability per site and year', lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'Average p') hist(N[,1], main = 'Distribution of N in first year', breaks = 50, col = 'grey') hist(N[,nyears], main = 'Distribution of N in last year', breaks = 50, col = 'grey') }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # -------------- arguments input ------------------- nsites = nsites, nsurveys = nsurveys, nyears = nyears, mean.lambda = mean.lambda, mean.gamma.rel = mean.gamma.rel, mean.phi = mean.phi, mean.p = mean.p, beta.lam = beta.lam, beta.gamma = beta.gamma, beta.phi = beta.phi, beta.p = beta.p, # ----------- values generated ------------------------- cov.lam = cov.lam, cov.gamma = cov.gamma, cov.phi = cov.phi, # covariates cov.p = cov.p, # covariate for p, nsites x nyears x nsurveys ccov.p = ccov.p, # covariate for p as 2D matrix, nsites x (nyears*nsurveys) N = N, # true number of individuals, nsites x nyears S = S, R = R, # number of survivors, recruits, nsites x (nyears-1) p = p, # probability of detection, nsites x nyears x nsurveys y = y, # number detected, nsites x nyears x nsurveys yy = yy)) # number detected as a 2D matrix, nsites x (nyears*nsurveys) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDM_AHM2_2-5-5_DailMadsen_model.R
# Function to generate the Habitat object used in section 11.10 # Density Surface Modeling # X : a 2-column matrix with coordinates of _regularly_spaced_ points along the transect line # (X <- regpoints@coords in book) # Ntotal : true number of individuals in the study area # sigma.move = 0 : not used! I took it out #### # sigma : scale parameter for the half-normal detection function # beta1 : coefficient for the relationship between density and the habitat covariate # nSurveys : the number of surveys to simulate # xlim, ylim : the extent of the (rectangular) study area simDSM <- function(X, Ntotal = 400, sigma = 0.65, beta1 = 1.0, nsurveys = 2, xlim = c(-0.5, 3.5), ylim = c(-0.5, 4.5), show.plots = TRUE) { # Create coordinates rasterized transect delta <- 0.2 # 2D bin width gry <- seq(ylim[1] + delta/2, ylim[2] - delta/2, delta) ny <- length(gry) grx <- seq(xlim[1] + delta/2, xlim[2] - delta/2, delta) nx <- length(grx) grx <- rep(grx, ny) gry <- rev(sort(rep(gry,nx))) gr <- cbind(grx, gry) nPix <- nrow(gr) # Create spatially correlated covariate x and plot it V <- exp(-e2dist(gr, gr)/1) x <- t(chol(V)) %*% rnorm(nrow(gr)) ### x = habitat covariate, stochastic # Simulate activity centre locations probs <- exp(beta1*x)/(sum(exp(beta1*x))) # Activity centers selected based on habitat s.pix.id <- sample(1:nPix, Ntotal, prob = probs, replace=TRUE) ### stochastic - sample ### N <- tabulate(s.pix.id, nbins = nPix) # Uniformly distributed within their pixel sx <- runif(Ntotal, gr[s.pix.id,1] - delta/2, gr[s.pix.id,1] + delta/2) ### stochastic sy <- runif(Ntotal, gr[s.pix.id,2] - delta/2, gr[s.pix.id,2] + delta/2) U <- cbind(sx,sy) ### new simulation - observation process # Compute p for each activity center for each point along the line parr <- numeric(Ntotal) # same for all surveys y2d <- array(0, c(Ntotal, nsurveys)) # 0/1 detection matrix for (i in 1:Ntotal) { dvec <- min( sqrt((sx[i] - X[, 1])^2 +(sy[i] - X[, 2])^2) ) loghaz <- -(1/(2*sigma*sigma)) * dvec * dvec parr[i] <- exp(loghaz) y2d[i, ] <- rbinom(nsurveys, 1, parr[i] ) ### stochastic } # Find which individuals are captured at least once, REMOVE the rest cap <- apply(y2d, 1, sum) > 0 # TRUE if captured at least once nind <- sum(cap) # number captured at least once y2d <- y2d[cap, , drop=FALSE] Ucap <- U[cap, ] # matrix with AC coords gid <- s.pix.id[cap] # pixel IDs for ACs # generate pixel matrix with nsurvey columns, with NA if not captured pixel <- matrix(gid, nrow=nind, ncol=nsurveys) pixel[y2d == 0] <- NA # for back compatibility we need to reverse the order and add colnames pixel <- pixel[nind:1, , drop=FALSE] colnames(pixel) <- paste0("gid", 1:nsurveys) # End of data preparation # Plot with activity centers and linking lines # Fig. 11-15 if(show.plots) { oldpar <- par(mar = c(3,3,3,6)); on.exit(par(oldpar)) tryPlot <- try( { image(r <- rasterFromXYZ(cbind(gr,x)), col = topo.colors(10)) image_scale(x, col = topo.colors(10)) lines(X, col = "black", pch = 20, lwd = 3) points(sx, sy, pch = 16, col = "black", lwd=1 ) # plot observed locations (i.e. detected individuals) points(Ucap, pch = 20, col = "red") # Add lines from detected ACs to nearest point on transect dd <- e2dist(X, Ucap) closest <- X[apply(dd, 2, which.min), ] segments(x0=Ucap[,1], y0=Ucap[,2], x1=closest[,1], y1=closest[,2]) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } return(list( # ............... arguments input .......................... X=X, Ntotal=Ntotal, sigma=sigma, beta1=beta1, nsurveys=nsurveys, xlim=xlim, ylim=ylim, # ............... generated values .......................... Habitat=as.vector(x), # a vector for the habitat covariate for each pixel Habgrid=gr, # a 2-column matrix with the coordinates of each pixel nPix=nPix, # the number of pixels in the study area N = N, # true number of individuals per pixel U = U, # locations of each individual in the population Ucap = Ucap, # locations of each individual detected at least once nind=nind, # the number of individuals detected at least once pixel=pixel) # a matrix with a column for each survey and a row # for each individual detected at least once, with the pixel ID for the # activity centre or NA if the individual was not detected on the survey ) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDSM.R
# A function to simulate data under a (thinned) inhomogenous Poisson point process (IPP) # Code adapted from Dorazio (GEB, 2014) and Koshkina et al. (MEE, 2017) # by Marc then decorticated by Mike. # Like the original D-K code, this version only allows one individual per pixel. # AHM2 chapter 10 # Helper function to generate bivariate normal covariate surfaces; not exported: # getCovSurface <- function(mWt=c(0.75, 0.4),sWt=c(0.25, 0.5), rho=0.5, # xmin=-1, xmax=1, ymin=-1, ymax=1, loc) { # mu.x <- xmin + mWt[1]*(xmax-xmin) # mu.y <- ymin + mWt[2]*(ymax-ymin) # sigma.x <- sWt[1]*abs(xmax-xmin) # sigma.y <- sWt[2]*abs(ymax-ymin) # rho1.xy <- 0.5 # mu <- c(mu.x, mu.y) # Sigma <- matrix(c(sigma.x^2, # rep(rho*sigma.x*sigma.y, 2), # sigma.y^2), ncol=2) # mvtnorm::dmvnorm(loc, mean=mu, sigma=Sigma) # } # ---------------------------------------- # ---------------- Start of function definition -------------------- simDataDK1 <- function(sqrt.npix = 100, alpha = c(-1,-1), beta = c(6,0.5), drop.out.prop.pb = 0.7, quadrat.size = 4, gamma = c(0,-1.5), nquadrats = 250, nsurveys = 3, show.plot = TRUE){ # # Function generates data for use with the integrated model described by Dorazio (GEB, 2014) # The function is based on the code written by Dorazio and adapted by Koshkina et al. (MEE, 2017) # # A Poisson point pattern (PPP) with intensity a function of a covariate X and intercept and # coefficient beta is simulated on a discrete (pixel-based) approximation of a continuous landscape # This PPP is then thinned with a pixel-wise thinning probability and with a # landscape-wise drop.out.prob.pb to produce a first data set of presence-only kind. # A second data set is simulated by imagining replicated pount counts in a total of nquadrats quadrats among # all square partitions of the landscape by quadrat.size. # Simulate data under a (thinned) Poisson point pattern (PPP). # sqrt.npix: number of pixels along each side of square state space (the 'landscape') # number of pixels is then sqrt.npix^2 # intensity of IPP: log(lambda) = beta0 + beta1 * covariate X # sampling detection bias in presence-only observations of IPP is modelled as: # logit(b) = alpha0 + alpha1 * covariate W # quadrat.size: length of the side of quadrats in *pixel* units by which PPP is summarized for conducting replicate counts or site-occ surveys # detection probability for the counts is governed by gamma0 and gamma1 (on the cell-averaged values of W) # Drop-out proportion is proportion of PO points at the end that are discarded (perhaps because these sites are not visited at all ?) # nsurveys: number of replicated surveys in the count survey # This is a 100x smaller version of the simulation field compared to Koshkina et al. # The grid is only 100 x 100 = 10k (instead of 1 M) # For the purpose of getting count surveys, we aggregate the original landscape by quadrats of size 16, which will yield 10k / 16 = 625 cells of 4 by 4 units side length # Of these, we will keep a random sample of 'nquadrats' quadrats for the count surveys. # Note that the parameters beta must be chosen such to avoid a too high 'filling' of the discrete approximation of the entire field B # ------------------------------------------------------ # -------------- Check and fix input ----------------------- sqrt.npix <- round(sqrt.npix[1]) stopifnotLength(alpha, 2) stopifnotLength(beta, 2) stopifnotProbability(drop.out.prop.pb) quadrat.size <- round(quadrat.size[1]) if(sqrt.npix %% quadrat.size != 0) stop("sqrt.npix / quadrat.size must return an integer.", call.=FALSE) stopifnotLength(gamma, 2) nquadrats <- round(nquadrats[1]) if(nquadrats > (sqrt.npix / quadrat.size)^2) stop("Number of quadrats to sample exceeds number of quadrats in the landscape.", call. = FALSE) nsurveys <- round(nsurveys[1]) # ------------------------------------------------------------ # Define landscape as a rectangular region S, with x and y ranging # from -1 to +1, hence: s.area <- 4 # Approximate the landscape by many small pixels in a raster object s <- temp <- raster(ncol=sqrt.npix, nrow=sqrt.npix, xmn=-1, xmx=1, ymn=-1, ymx=1, crs=NULL) # s will become a Raster Stack, temp is a template to create new layers s.loc <- xyFromCell(temp, 1:ncell(s)) # Coordinates of every pixel in S # PART A. Ecological process - where are the animals? # --------------------------------------------------- # Compute covariate X as the sum of two MVN random variables xcov <- 0.4 * getCovSurface(mWt=c(0.75,0.4), sWt=c(0.25, 0.5),rho=0.5, loc=s.loc) + 0.6 * getCovSurface(mWt=c(0.15,0.8), sWt=c(0.5, 0.25),rho=-0.4, loc=s.loc) xcov <- standardize(xcov) # Standardize covariate x # Fill covariate x into the raster s values(s) <- xcov names(s) <- 'x' # Calculate lambda as a function of X and add to Raster Stack X <- cbind(1, xcov) values(temp) <- exp(X %*% beta) # calculate lambda names(temp) <- 'lambda' s <- addLayer(s, temp) # add to raster stack # We use rejection sampling to get random draws from the IPP. The proposal # distribution is uniform, ie, HPP: # How 'high' should the proposal density be? It must exceed the IPP everywhere. ( maxlambda <- max(values(s)[,'lambda']) ) # ~ 900 per unit area (N.hpp <- suppressWarnings(rpois(1, maxlambda*s.area))) # Number we need to draw if(is.na(N.hpp) || N.hpp >= ncell(s)) stop("The 'beta' settings result in intensities that are too high\nin the most intense region (more animals than pixels.)", call.=FALSE) # Draw from the proposal distribution ind.hpp <- sample(1:ncell(s), size = N.hpp, replace = FALSE) # sampling w/o replacement ensures only 1 individual per pixel # reject draws depending on lambda (and hence X) to get the IPP draws lambda.hpp <- values(s)[,'lambda'][ind.hpp] # intensities at those pixels ind.ipp <- rbinom(N.hpp, 1, lambda.hpp/maxlambda) # use intensity lambda to determine whether to accept or reject. (N.ipp <- sum(ind.ipp)) # about 1800 individuals in IPP pixel.id.ipp <- ind.hpp[ind.ipp == 1] # Gives id of every pixel in landscape that has an individual, a vector of numbers, length N.ipp = total population # PART B. The detection-only observation model # -------------------------------------------- # Create covariate W which affects which animals are detected (ie, where people look) wcov <- getCovSurface(mWt=c(0.25,0.65), sWt=c(0.25, 0.5),rho=0.1, loc=s.loc) wcov <- standardize(wcov) # Standardize covariate w # Add to the raster stack values(temp) <- wcov names(temp) <- 'w' s <- addLayer(s, temp) # Compute value of thinning parameter b (= probability of detection) # for each cell as a function of alpha and covariate W W <- cbind(1, wcov) # Design matrix W for thinning values(temp) <- plogis(W %*% alpha) # thinning coefs names(temp) <- 'pTrue' s <- addLayer(s, temp) # ... simulate presence-only data (= detections of individuals as a thinned point process) pTrue.ipp <- values(s)[,'pTrue'][pixel.id.ipp] y.ipp1 <- rbinom(N.ipp, size=1, prob=pTrue.ipp) # 1 = detected, 0 = not detected # A 1/0 vector, length N.ipp pixel.id.det1 <- pixel.id.ipp[y.ipp1 == 1] # length(pixel.id.det1) # ~ 550. This is too many, better drop a bunch drop.out <- runif(length(pixel.id.det1), 0, 1) < drop.out.prop.pb # T/F vector pixel.id.det <- pixel.id.det1[!drop.out] # length(pixel.id.det) # ~ 180, ok y.point <- numeric(sqrt.npix^2) y.point[pixel.id.det] <- 1 #### Part C: simulate replicate count data ## --------------------------------------- # Get the info on animal location into our Raster Stack as a layer: spop <- dropLayer(s, c('lambda','pTrue')) # clean up, just keep covars z <- rep(0, ncell(spop)) z[pixel.id.ipp] <- 1 values(temp) <- z names(temp) <- 'presence' # 1 if animal in pixel, 0 otherwise (max 1 animal per pixel) spop <- addLayer(spop, temp) # Form quadrats of side quadrat.size (default 4 -> area 16 -> 625 quadrats) quadfact <- c(quadrat.size, quadrat.size) squad <- raster::aggregate(spop, fact=quadfact, fun=mean) # mean is ok for x and w, but for 'presence' we need the sum abund <- raster::aggregate(raster::subset(spop, 'presence'), fact=quadfact, fun=sum) names(abund) <- 'N' squad <- addLayer(squad, abund) squad <- dropLayer(squad, 'presence') # clean up, N/16 not useful # Simulate replicate counts at every quadrat (aka "site") nsite <- ncell(squad) # number of sites/quadrats in count design N <- values(squad)[,'N'] # Extract latent abundance at each site # Compute values of detection probability for each 16-cell pixel in count survey # (Here we use the same covar, w, as the detection-only model, as do Koshkina et al, # but a different covar could be generated.) pcount <- plogis(gamma[1] + gamma[2] * values(squad)[,'w']) # Do the nsurveys surveys counts <- array(NA, dim = c(nsite, nsurveys)) for(j in 1:nsurveys){ counts[,j] <- rbinom(nsite, N, pcount) } fullCountData <- cbind(quadID=1:nsite, values(squad), counts) # Draw random sample of 'nquadrats' quadrats from the total number, nsite selQuad <- sort(sample(1:nsite, nquadrats, replace = FALSE)) countData <- fullCountData[selQuad,] # Output (visual): shown only when show.plot = TRUE if(show.plot) { oldpar <- par(mfrow=1:2, mar = c(2,1,6,3)) oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) # Fig.1 loc.ipp <- s.loc[pixel.id.ipp, ] raster::plot(raster::subset(s, 'x'), axes = FALSE, box = FALSE, asp=1, main = paste("Inhomogenous Poisson point process:\nIntensity covariate 'x' and\nlocations of", N.ipp, "individuals")) points(loc.ipp, pch = 16, cex = 0.5) # location of the individuals loc.det <- s.loc[pixel.id.det, ] N.det <- length(pixel.id.det) raster::plot(raster::subset(s, 'w'), axes = FALSE, box = FALSE, asp=1, main = paste("Presence-only observations:\nDetection bias covariate 'w' and\nlocations of", N.det, "individuals detected")) points(loc.det, pch = 16, cex = 0.5) # location of the individuals # Fig.2 par(mfrow=c(2,2), mar = c(1,1,5,3)) raster::plot(raster::subset(squad, 'x'), axes = FALSE, box = FALSE, asp=1, main = "Mean intensity covariate 'x'\nfor each quadrat") raster::plot(raster::subset(squad, 'w'), axes = FALSE, box = FALSE, asp=1, main = "Mean detection covariate 'w'\nfor each quadrat") raster::plot(raster::subset(squad, 'N'), axes = FALSE, box = FALSE, asp=1, main = "True abundance 'N'\nfor each quadrat" ) mnc <- rowMeans(counts) mnc[-selQuad] <- NA cnt <- raster::subset(squad, 'N') values(cnt) <- mnc raster::plot(cnt, colNA='darkgrey',axes = FALSE, box = FALSE, asp=1, main = "Mean counts for \neach quadrat surveyed,\ngrey if unsurveyed") } # end show.plot # Output (numeric) return(list( # ---------------- input arguments ------------------ sqrt.npix = sqrt.npix, alpha = alpha, beta = beta, gamma = gamma, drop.out.prop.pb = drop.out.prop.pb, quadrat.size = quadrat.size, nquadrats = nquadrats, nsurveys =nsurveys, # ----------------- values generated ------------------- npix = sqrt.npix^2, # Number of pixels in the landscape s.area = s.area, # Area of the landscape, 4 s.loc = s.loc, # Coordinates of every pixel in the landscape xcov = xcov, # 'x' (intensity) covariate wcov = wcov, # 'w' (detection) covariate N.ipp = N.ipp, # True number of individuals in the landscape pixel.id.ipp = pixel.id.ipp, # Pixel ID for each individual in the population loc.ipp = s.loc[pixel.id.ipp, ], # Coordinates for each individual in the population pTrue.ipp = pTrue.ipp, # Probability of detection for each individual pixel.id.det = pixel.id.det, # Pixel ID for each individual detected N.det = length(pixel.id.det), # Number of detections loc.det = s.loc[pixel.id.det, ], # Coordinates for each individual detected pcount = pcount, # Probability of detection in each quadrat fullCountData = fullCountData, # matrix with rows for each quadrat, columns for ID, x and w coords, # true N, and 3 replicate counts countData = countData, # as above, but rows for quadrats sampled only s = s, # Raster Layer object with ... for all pixels squad = squad)) # Raster Layer object with ... for quadrats } # ---------------- End of function definition --------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDataDK1_21--.R
# A function to simulate data under a (thinned) inhomogenous Poisson point process (IPP) # Code adapted from Dorazio (GEB, 2014) and Koshkina et al. (MEE, 2017) # by Marc then decorticated by Mike. # The original D-K code only allows one individual per pixel, which works if you have many more pixels than animals, but is not necessary. This version allows more than one animal per pixel. # AHM2 chapter 10 # Helper function to generate bivariate normal covariate surfaces; not exported: getCovSurface <- function(mWt=c(0.75, 0.4),sWt=c(0.25, 0.5), rho=0.5, xmin=-1, xmax=1, ymin=-1, ymax=1, loc) { mu.x <- xmin + mWt[1]*(xmax-xmin) mu.y <- ymin + mWt[2]*(ymax-ymin) sigma.x <- sWt[1]*abs(xmax-xmin) sigma.y <- sWt[2]*abs(ymax-ymin) rho1.xy <- 0.5 mu <- c(mu.x, mu.y) Sigma <- matrix(c(sigma.x^2, rep(rho*sigma.x*sigma.y, 2), sigma.y^2), ncol=2) mvtnorm::dmvnorm(loc, mean=mu, sigma=Sigma) } # ---------------------------------------- # ---------------- Start of function definition -------------------- simDataDK <- function(sqrt.npix = 100, alpha = c(-1,-1), beta = c(6,0.5), drop.out.prop.pb = 0.7, quadrat.size = 4, gamma = c(0,-1.5), nquadrats = 250, nsurveys = 3, show.plot = TRUE){ # # Function generates data for use with the integrated model described by Dorazio (GEB, 2014) # The function is based on the code written by Dorazio and adapted by Koshkina et al. (MEE, 2017) # # A Poisson point pattern (PPP) with intensity a function of a covariate X and intercept and # coefficient beta is simulated on a discrete (pixel-based) approximation of a continuous landscape # This PPP is then thinned with a pixel-wise thinning probability and with a # landscape-wise drop.out.prob.pb to produce a first data set of presence-only kind. # A second data set is simulated by imagining replicated pount counts in a total of nquadrats quadrats among # all square partitions of the landscape by quadrat.size. # Simulate data under a (thinned) Poisson point pattern (PPP). # sqrt.npix: number of pixels along each side of square state space (the 'landscape') # number of pixels is then sqrt.npix^2 # intensity of IPP: log(lambda) = beta0 + beta1 * covariate X # sampling detection bias in presence-only observations of IPP is modelled as: # logit(b) = alpha0 + alpha1 * covariate W # quadrat.size: length of the side of quadrats in *pixel* units by which PPP is summarized for conducting replicate counts or site-occ surveys # detection probability for the counts is governed by gamma0 and gamma1 (on the cell-averaged values of W) # Drop-out proportion is proportion of PO points at the end that are discarded (perhaps because these sites are not visited at all ?) # nsurveys: number of replicated surveys in the count survey # This is a 100x smaller version of the simulation field compared to Koshkina et al. # The grid is only 100 x 100 = 10k (instead of 1 M) # For the purpose of getting count surveys, we aggregate the original landscape by quadrats of size 16, which will yield 10k / 16 = 625 cells of 4 by 4 units side length # Of these, we will keep a random sample of 'nquadrats' quadrats for the count surveys. # Note that the parameters beta must be chosen such to avoid a too high 'filling' of the discrete approximation of the entire field B # ------------------------------------------------------ # -------------- Check and fix input ----------------------- sqrt.npix <- round(sqrt.npix[1]) stopifnotLength(alpha, 2) stopifnotLength(beta, 2) stopifnotProbability(drop.out.prop.pb) quadrat.size <- round(quadrat.size[1]) if(sqrt.npix %% quadrat.size != 0) stop("sqrt.npix / quadrat.size must return an integer.", call.=FALSE) stopifnotLength(gamma, 2) nquadrats <- round(nquadrats[1]) if(nquadrats > (sqrt.npix / quadrat.size)^2) stop("Number of quadrats to sample exceeds number of quadrats in the landscape.", call. = FALSE) nsurveys <- round(nsurveys[1]) # ------------------------------------------------------------ # Define landscape as a rectangular region S, with x and y ranging # from -1 to +1, hence: s.area <- 4 # Approximate the landscape by many small pixels in a raster object s <- temp <- raster(ncol=sqrt.npix, nrow=sqrt.npix, xmn=-1, xmx=1, ymn=-1, ymx=1, crs=NULL) # s will become a Raster Stack, temp is a template to create new layers s.loc <- xyFromCell(temp, 1:ncell(s)) # Coordinates of every pixel in S # PART A. Ecological process - where are the animals? # --------------------------------------------------- # Compute covariate X as the sum of two MVN random variables xcov <- 0.4 * getCovSurface(mWt=c(0.75,0.4), sWt=c(0.25, 0.5),rho=0.5, loc=s.loc) + 0.6 * getCovSurface(mWt=c(0.15,0.8), sWt=c(0.5, 0.25),rho=-0.4, loc=s.loc) xcov <- standardize(xcov) # Standardize covariate x # Fill covariate x into the raster s values(s) <- xcov names(s) <- 'x' # Calculate log(lambda) as a function of X X <- cbind(1, xcov) loglam <- X %*% beta # calculate log lambda # We use rejection sampling to get random draws from the IPP. The proposal # distribution is uniform. # The proposal density must exceed the IPP everywhere. maxloglam <- max(loglam) (N.prop <- suppressWarnings(rpois(1, exp(maxloglam)*s.area))) # Number we need to draw if(is.na(N.prop)) stop("The 'beta' settings result in intensities that are too high\nin the most intense region.", call.=FALSE) # Draw from the proposal distribution ind.prop <- sample.int(sqrt.npix^2, size = N.prop, replace = TRUE) ### In this version, replace=TRUE -> more than one individual per pixel. ### # reject draws depending on lambda (and hence X) to get the IPP draws loglam.prop <- loglam[ind.prop] # intensities at those pixels retain <- rbinom(N.prop, 1, exp(loglam.prop-maxloglam)) # use intensity lambda to determine whether to accept or reject. (N.pop <- sum(retain)) # about 1800 individuals in IPP pixel.id.pop <- ind.prop[retain == 1] # Gives pixel id for every individual. loc.pop <- s.loc[pixel.id.pop,] + matrix(runif(N.pop*2, -1/sqrt.npix, 1/sqrt.npix), ncol=2) # How many in each pixel? n <- tabulate(pixel.id.pop, nbins=sqrt.npix^2) # Add to the raster stack values(temp) <- n names(temp) <- 'n' s <- addLayer(s, temp) # PART B. The detection-only observation model # -------------------------------------------- # Create covariate W which affects which animals are detected (ie, where people look) wcov <- getCovSurface(mWt=c(0.25,0.65), sWt=c(0.25, 0.5),rho=0.1, loc=s.loc) wcov <- standardize(wcov) # Standardize covariate w # Add to the raster stack values(temp) <- wcov names(temp) <- 'w' s <- addLayer(s, temp) # Compute value of thinning parameter b (= probability of detection) # for each cell as a function of alpha and covariate W W <- cbind(1, wcov) # Design matrix W for thinning pTrue <- plogis(W %*% alpha) # thinning coefs # ... simulate presence-only data (= detections of individuals as a thinned point process) pTrue.pop <- pTrue[pixel.id.pop] y.det0 <- rbinom(N.pop, size=1, prob=pTrue.pop) # 1 = detected, 0 = not detected pixel.id.det0 <- pixel.id.pop[y.det0 == 1] # length(pixel.id.det0) # ~ 550. This is too many, better drop a bunch drop.out <- runif(length(pixel.id.det0), 0, 1) < drop.out.prop.pb # T/F vector pixel.id.det <- pixel.id.det0[!drop.out] ( N.det <- length(pixel.id.det) ) # ~ 180, ok y.pixel <- tabulate(pixel.id.det, nbins=sqrt.npix^2) # range(y.pixel) loc.det <- loc.pop[y.det0==1, ][!drop.out, ] #### Part C: simulate replicate count data ## --------------------------------------- # Form quadrats of side quadrat.size (default 4 -> area 16 -> 625 quadrats) quadfact <- c(quadrat.size, quadrat.size) squad <- raster::aggregate(s, fact=quadfact, fun=mean) # mean is ok for x and w, but for 'n' we need the sum abund <- raster::aggregate(raster::subset(s, 'n'), fact=quadfact, fun=sum) names(abund) <- 'N' squad <- addLayer(squad, abund) squad <- dropLayer(squad, 'n') # clean up, N/16 not useful # Simulate replicate counts at every quadrat (aka "site") nsite <- ncell(squad) # number of sites/quadrats in count design N <- values(squad)[,'N'] # Extract latent abundance at each site # Compute values of detection probability for each 16-cell pixel in count survey # (Here we use the same covar, w, as the detection-only model, as do Koshkina et al, # but a different covar could be generated.) pcount <- plogis(gamma[1] + gamma[2] * values(squad)[,'w']) # Do the nsurveys counts counts <- array(NA, dim = c(nsite, nsurveys)) for(j in 1:nsurveys){ counts[,j] <- rbinom(nsite, N, pcount) } fullCountData <- cbind(quadID=1:nsite, values(squad), counts) # Draw random sample of 'nquadrats' quadrats from the total number, nsite selQuad <- sort(sample(1:nsite, nquadrats, replace = FALSE)) countData <- fullCountData[selQuad,] # Output (visual): shown only when show.plot = TRUE if(show.plot) { oldpar <- par(mfrow=1:2, mar = c(2,1,6,3)) oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) tryPlot <- try( { # Fig.1 raster::plot(raster::subset(s, 'x'), axes = FALSE, box = FALSE, asp=1, main = paste("Inhomogenous Poisson point process:\nIntensity covariate 'x' and\nlocations of", N.pop, "individuals")) points(loc.pop, pch = 16, cex = 0.5) # location of the individuals raster::plot(raster::subset(s, 'w'), axes = FALSE, box = FALSE, asp=1, main = paste("Presence-only observations:\nDetection bias covariate 'w' and\nlocations of", N.det, "individuals detected")) points(loc.det, pch = 16, cex = 0.5) # location of the individuals # Fig.2 par(mfrow=c(2,2), mar = c(1,1,5,3)) raster::plot(raster::subset(squad, 'x'), axes = FALSE, box = FALSE, asp=1, main = "Mean intensity covariate 'x'\nfor each quadrat") raster::plot(raster::subset(squad, 'w'), axes = FALSE, box = FALSE, asp=1, main = "Mean detection covariate 'w'\nfor each quadrat") raster::plot(raster::subset(squad, 'N'), axes = FALSE, box = FALSE, asp=1, main = "True abundance 'N'\nfor each quadrat" ) mnc <- rowMeans(counts) mnc[-selQuad] <- NA cnt <- raster::subset(squad, 'N') values(cnt) <- mnc raster::plot(cnt, colNA='darkgrey',axes = FALSE, box = FALSE, asp=1, main = "Mean counts for \neach quadrat surveyed,\ngrey if unsurveyed") }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # end show.plot # Output (numeric) return(list( # ---------------- input arguments ------------------ sqrt.npix = sqrt.npix, alpha = alpha, beta = beta, gamma = gamma, drop.out.prop.pb = drop.out.prop.pb, quadrat.size = quadrat.size, nquadrats = nquadrats, nsurveys =nsurveys, # ----------------- values generated ------------------- npix = sqrt.npix^2, # Number of pixels in the landscape s.area = s.area, # Area of the landscape, 4 s.loc = s.loc, # Coordinates of every pixel in the landscape xcov = xcov, # 'x' (intensity) covariate wcov = wcov, # 'w' (detection) covariate N.ipp = N.pop, # True number of individuals in the landscape pixel.id.ipp = pixel.id.pop, # Pixel ID for each individual in the population loc.ipp = loc.pop, # Coordinates for each individual in the population pTrue.ipp = pTrue, # Probability of detection for each individual pixel.id.det = pixel.id.det, # Pixel ID for each individual detected N.det = N.det, # Number of detections loc.det = loc.det, # Coordinates for each individual detected pcount = pcount, # Probability of detection in each quadrat fullCountData = fullCountData, # matrix with rows for each quadrat, columns for ID, x and w coords, # true N, and 3 replicate counts countData = countData, # as above, but rows for quadrats sampled only s = s, # Raster Layer object with ... for all pixels squad = squad)) # Raster Layer object with ... for quadrats } # ---------------- End of function definition --------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDataDK_AHM2_10.R
# Another function for Chapter 15 in AHM2 simDemoDynocc<- function(nsites = 100, nyears = 10, nvisits = 5, psi1 = 0.6, range.phi = c(0.2, 0.9), range.r = c(0, 0.4), range.p = c(0.1, 0.9), show.plot=TRUE) { # # Function simulates data under a variant of the demographic occupancy # (or 'local survival') model of Roth & Amrhein (J. Appl. Ecol., 2010). # Data are simulated in an 'unconditional' manner, i.e., for each site from first to last year. # All parameter can be made year-dependent by specification of a range, # within which annual values will be drawn from uniform distributions. # # What the function arguments mean: # psi1 = probability a territory is occupied at t=1 # nsites = number of territories # nyears = number of study years # nvisits = number of replicate visits per site and year # range.phi = lower and upper limit of uniform distribution, from which # annual local survival probability is drawn # range.r = lower and upper limit of uniform distribution, from which # annual recruitment probability is drawn # range.p = lower and upper limit of uniform distribution, from which # annual detection probability is drawn # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nyears <- round(nyears[1]) nvisits <- round(nvisits[1]) stopifnotProbability(psi1) stopifnotProbability(range.phi) # bounds stopifnotProbability(range.r) # bounds stopifnotProbability(range.p) # bounds # ---------------------------------------------------------------- # Define true territory occupancy state matrix z z <- matrix(rep(NA, nyears*nsites), ncol=nyears) # Define the 3-dimensional matrix y that contains the observations y <- array(NA, dim = c(nsites, nvisits, nyears)) # Simulate the annual local survival (nyears - 1 intervals) phi <- runif(nyears-1, min(range.phi), max(range.phi)) # Simulate the annual colonization (nyears - 1 intervals) r <- runif(nyears-1, min(range.r), max(range.r)) # Simulate the annual detection (includes year 1) p <- runif(nyears, min(range.p), max(range.p)) # Simulate true state z from t=1:nyears persistence <- new.colonization <- z # Provide intermediate structures for(i in 1:nsites) { # Initial year (t=1) z[i,1] <- rbinom(1, 1, psi1) for(t in 2:nyears) { persistence[i,t] <- z[i,t-1] * phi[t-1] + z[i,t-1] * (1-phi[t-1]) * r[t-1] # survival or a 'rescue process' new.colonization[i,t] <- (1-z[i,t-1]) * r[t-1] z[i,t] <- rbinom(1, 1, persistence[i,t] + new.colonization[i,t]) } } # Observations from t=1:nyears for(i in 1:nsites) { for(t in 1:nyears) { for(j in 1:nvisits) { y[i,j,t] <- rbinom(1, 1, z[i,t] * p[t]) } } } # Create vector with 'occasion of marking' (for observed data) obsz <- apply(y, c(1,3), max) f <- suppressWarnings(apply(obsz, 1, function(x) min(which(x!=0)))) f[f == 'Inf'] <- nyears # Derived quantities nocc.true <- apply(z, 2, sum) # True ... nocc.obs <- apply(obsz, 2, sum) # ... and observed number of pairs if(show.plot) { # Visualization by two graphs oldpar <- par(mfrow = c(1, 2), mar = c(5, 5, 4, 2), cex.lab = 1.5) on.exit(par(oldpar)) tryPlot <- try( { plot(1, 0, type = 'n', ylim = c(0,1), frame = FALSE, xlab = "Year", ylab = "Probability", xlim = c(1, nyears), las = 1, main = 'Local survival, recruitment and detection', xaxt='n') axis(1, 1:nyears) lines(1:(nyears-1), phi, type = 'o', pch=16, lwd = 2, col = 4, lty=2) lines(1:(nyears-1), r, type = 'o', pch=16, lwd = 2, col = 2, lty=3) lines(1:nyears, p, type = 'o', pch=16, lwd = 2, col = 1) legend('top', c("survival", "recruitment", "detection"), lty=c(2,3,1), lwd=2, col=c(4,2,1), #pch=16, inset=c(0, -0.05), bty='n', xpd=NA, horiz=TRUE) plot(1:nyears, nocc.true, type = 'n', frame = FALSE, xlab = "Year", ylab = "Population size", xlim = c(1, nyears), ylim = c(0, nsites), las = 1, main = 'True and observed population size', xaxt='n') axis(1, 1:nyears) lines(1:nyears, nocc.true, type = 'o', pch=16, lwd = 2, col = 2, lty=1) lines(1:nyears, nocc.obs, type = 'o', pch=16, lwd = 2, col = 4, lty=2) legend('top', c("true", "observed"), lty=c(1,2), lwd=2, col=c(2,4), inset=c(0, -0.05), bty='n', xpd=NA, horiz=TRUE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Return stuff return(list( # ----------- arguments supplied ----------------------- psi1 = psi1, nsites = nsites, nyears = nyears, nvisits = nvisits, range.phi = range.phi, range.r = range.r, range.p = range.p, # ----------- generated values --------------------------- phi = phi, r = r, p = p, z = z, y = y, f = f, nocc.true = nocc.true, nocc.obs = nocc.obs)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDemoDynocc_AHM2_4.R
# 1. Define an R function to generate dynamic presence/absence systems with 'space' # Code to define a function for simulating data. # AHM2 - 9.6.1.1 #### Uses 'fields' instead of 'RandomFields' #### simDynoccSpatial <- function(side = 50, nyears = 10, nsurveys = 3, mean.psi1 = 0.4, beta.Xpsi1 = 0, range.phi = c(0.8, 0.8), beta.Xphi = 0, range.gamma = c(0.1, 0.1), beta.Xgamma = 0, range.p = c(0.4, 0.4), beta.Xp = 0, theta.XAC = 5000, beta.XAC = c(0, 0, 0, 0), beta.Xautolog = c(0, 0), trend.sd.site = c(0, 0), trend.sd.survey = c(0, 0), seed.XAC = NA, seed = NULL, show.plots= TRUE, ask.plot = TRUE, verbose=TRUE) { # # Written by Marc Kéry, 2014-2018 # # Function to simulate detection/nondetection data in a square area # under a very general dynamic site-occ model, including the # following effects: # (1) annual variation in the probabilities of patch persistence, # colonization and detection can be specified by the bounds of a # uniform distribution. # (2) one site-, site/year-, and site/year/rep-specific covariate # is allowed to affect the probabilities of occupancy # (beta.Xpsi1 for site-covariate), colonisation/persistence # (beta.Xgamma, beta.Xphi, for yearly site-covariate), and # detection (beta.Xp for observational covariate), respectively. # (3) a single, spatially structured covariate for habitat suitability # may affect all parameters via coefficient beta.XAC (for a # biologically reasonable way, choose coefficients with the same sign # for all 4 (mediated by underlying density). # That spatial covariate is simulated as a Gaussian random field # with negative exponential correlation function with # 'range parameter' theta.XAC # (4) autologistic effects (beta.Xautolog) in persistence and colonization # probability can be chosen, which fits a logistic regression of # these parameters on the proportion of occupied neighbouring cells # (in a queen's or 2nd order neighbourhood) during the previous time step # (5) Additional detection heterogeneity can be introduced # at the site- or the individual survey level, with the possibility of a # temporal trend in this heterogeneity. For instance, an annual trend in # detection heterogeneity at the site or the survey level is specified by # the value in the first and the last year. # Hence, trend.sd.site = c(0, 1) will result in a linear trend in # the magnitude of site heterogeneity in detection from 0 in the # first year to 1 in the last year. # # # Function arguments: # ------------------- # # *** Design of study and basic 'magnitude' of parameters *** # side – side length of square simulation area. Therefore, # the number of sites, or cells, M = side^2 # nsurveys – Number of replicate surveys within a 'season', year or primary period # nyears – Number of years (or 'seasons') # mean.psi1 – intercept of occupancy probability in year 1 # range.phi and range.gamma – bounds of uniform distribution from which # annual intercepts for persistence (phi) and colonisation (gamma) # are drawn # range.p – same for detection probability p # # # *** Covariates *** # beta.Xpsi1: coefficient of a site covariate in psi1 # beta.Xphi: coefficient of a site/year covariate in phi # beta.Xgamma: coefficient of a site/year covariate in gamma # beta.Xp: coefficient of a site/year/rep covariate in p # # # *** Parameters governing the spatial correlations *** # theta.XAC: 'range parameter' of a covariate with exponential # spatial correlation (i.e., a Gaussian random field is used as an # environmental covariate). NOTE: if you want to set to zero the effects # of this spatially autocorrelated variable, you CANNOT # set theta.XAC=0 because this breaks the function, # nor can you simply choose a very small value. # Instead you MUST set the elements of coefficients vector beta.XAC # to zero. # beta.XAC: vector of coefficients of that field for the 4 model params: # psi1, phi, gamma, and p (in that order) # beta.Xautolog – vector of coefficients of autologistic covariate # in the following order: persistence (phi), colonization (gamma). # Autocovariate is computed at every season as the proportion of # occupied cells in a queen's neighbourhood around each cell. # # # *** Detection heterogeneity *** # trend.sd.site: range of year-specific values of SD of Gaussian # random site effects in p: c(1,1) specifies constant value of 1 # for all years, while c(0,1) specifies linear increase over the years # from 0 to 1. # trend.sd.survey: range of year-specific values of standard deviation # of Gaussian random survey effects in p: specification as # for trend.sd.site # # *** Graphics control and other *** # seed – allows to 'fix' the simulation such that it becomes reproducible # ask.plot – if TRUE permits to browse through plots (otherwise if FALSE) if(FALSE) {x <- NULL; rm(x)} # Stops R CMD check choking on 'curve'. # Checks and fixes for input data ----------------------------- side <- round(side[1]) nyears <- round(nyears[1]) stopifnotGreaterthan(nyears, 1) nsurveys <- round(nsurveys[1]) stopifnotProbability(mean.psi1) stopifnotProbability(range.phi) # bounds stopifnotProbability(range.gamma) # bounds stopifnotProbability(range.p) # bounds stopifNegative(theta.XAC, allowZero=FALSE) stopifnotLength(beta.XAC, 4) stopifnotLength(beta.Xautolog, 2) stopifnotLength(trend.sd.site, 2) # trend stopifNegative(trend.sd.site) stopifnotLength(trend.sd.survey, 2) # trend stopifNegative(trend.sd.survey) # ---------------------------------------------------------------- # Restore graphical settings on exit ----------------------------- if(show.plots) { oldpar <- par("mfrow", "mar", "cex.main", "cex.lab", "cex.axis") oldAsk <- devAskNewPage(ask = ask.plot && dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) } # ---------------------------------------------------------------- # Create grid xcoord <- 1:side ycoord <- 1:side grid <- as.matrix(expand.grid(x=xcoord, y=ycoord)) M <- side^2 # Total number of cells or sites # Compute adjacency matrix for grid neigh <- spdep::dnearneigh(as.matrix(grid), d1 = 0, d2 = sqrt(2) * 1 + 0.01) winnb <- spdep::nb2WB(neigh) # Function to get CAR ingredients for BUGS nneigh <- winnb$num # number of neighbours amatrix <- spdep::nb2mat(neigh) amatrix[amatrix > 0] <- 1 # Neighbours get a 1, non-neighbours a 0 # Set up arrays needed site <- 1:M # Sites year <- 1:nyears # Years prob <- array(dim = c(side, side)) # p matrix psi <- muZ <- z <- array(dim = c(side, side, nyears)) # Occupancy, occurrence phi <- gamma <- array(NA, dim = c(side, side, (nyears-1))) # Survival, colonisation Xauto <- array(NA, dim = c(side, side, nyears)) # Autocovariate y <- p <- array(NA, dim = c(side, side, nsurveys, nyears)) # Det. histories and p # Create values of 1 spatially autocorrelated covariate XAC # Generate correlated random variables in a square #if(requireNamespace("RandomFields", quietly=TRUE)) { # RandomFields::RFoptions(seed=seed.XAC) # Default NA; 88 gives cool pattern # XAC <- matrix(RandomFields::RFsimulate(RandomFields::RMexp(var = 1, scale = theta.XAC), # x=xcoord, y=ycoord, grid=TRUE)@data$variable1, # ncol = side, byrow = TRUE) # variance 1 # if(!is.na(seed.XAC)) # RandomFields::RFoptions(seed=NA) #} else { message("Using package 'fields' instead of 'RandomFields'; see help(simDynoccSpatial).") if(!is.na(seed.XAC)) set.seed(seed.XAC) obj <- circulantEmbeddingSetup(grid=list(x=xcoord, y=ycoord), Covariance="Exponential", aRange=theta.XAC) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'side' or 'theta.XAC'.") XAC <- matrix(tmp, ncol = side, byrow = TRUE) #} set.seed(seed=seed) # Default NULL; do this AFTER RFsimulate # Create four spatially unstructured covariates # Site covariate for psi1 Xpsi1 <- matrix(runif(M, -2, 2), ncol = side) # Yearly-site covariates for phi and gamma Xphi <- Xgamma <- array(runif(M*nyears, -2, 2), dim = c(side, side, nyears)) # Observational covariate for p Xp <- array(runif(M*nsurveys*nyears,-2,2), dim=c(side, side,nsurveys,nyears)) # Draw values of baseline levels of the main parameters # (i.e., draw year effects if any) mean.phi <- runif(n = nyears-1, min = min(range.phi), max = max(range.phi)) mean.gamma <- runif(n = nyears-1, min = min(range.gamma), max = max(range.gamma)) mean.p <- runif(n = nyears, min = min(range.p), max = max(range.p)) # (a) Simulate state process parameters: initial state (first year) psi[,,1] <- plogis(qlogis(mean.psi1) + beta.Xpsi1 * Xpsi1 + beta.XAC[1] * XAC) # psi1 # (b) Simulate state in first year z[,,1] <- rbinom(M, 1, psi[,,1]) # Initial occurrence state # Compute value of autocovariate after first year = proportion of neighbours occupied # first vectorize and then put into matrix again Xautovec <- amatrix %*% c(z[,,1]) Xauto[,,1] <- matrix(Xautovec/nneigh, ncol = side) # Put back in matrix by column again # Do the pre-loop plots # --------------------- if(show.plots) { tryPlot <- try( { # Plot effects of autocovariate on (year-specific) phi and gamma par(mfrow = c(1, 2)) curve(plogis(qlogis(mean.phi[1]) + beta.Xautolog[1] * x), 0, 1, main = "Persistence: \nphi ~ Year + Autocovariate", xlab = "Autocov. (prop. occupied neighb.)", ylab = "phi", ylim = c(0,1), frame = FALSE) for(k in 2:(nyears-1)){ curve(plogis(qlogis(mean.phi[k])+beta.Xautolog[1]*x),0,1,add=TRUE) } curve(plogis(qlogis(mean.gamma[1]) + beta.Xautolog[2] * x), 0, 1, main = "Colonization: \ngamma ~ Year + Autocovariate", xlab = "Autocovariate (prop. occupied neighb.)", ylab = "gamma", ylim = c(0,1), frame = FALSE) for(k in 2:(nyears-1)){ curve(plogis(qlogis(mean.gamma[k])+beta.Xautolog[2]*x),0,1,add=TRUE) } # Simulate true system dynamics par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) # Plot random field covariate XAC # rows are in x, columns in y direction image(1:side, 1:side, XAC, col=topo.colors(100), main = paste("Gaussian random field XAC with \n neg. exponential correlation (range =", theta.XAC, ")"), xlab = 'x', ylab = 'y') image(1:side, 1:side, psi[,,1], col=topo.colors(100), main = paste("Initial occupancy probability"), xlab = 'x', ylab = 'y') image(1:side, 1:side, z[,,1], col=c("white", "black"), main = paste("Initial presence/absence (true system state z):\n black = occupied, white = unoccupied"), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, Xauto[,,1], col=topo.colors(100), main = "Autocovariate between year 1 and year 2", xlab = 'x', ylab = 'y') }, silent = TRUE) if(inherits(tryPlot, "try-error")) { show.plots <- FALSE tryPlotError(tryPlot) } } # (c) Simulate state process parameters: time steps 2:nyears for(k in 2:nyears){ par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) if(verbose) cat(paste("** Year", k, "**\n")) # Compute colonisation and extinction parameters and plot phi[,,k-1] <- plogis(qlogis(mean.phi[k-1]) + beta.Xphi * Xphi[,,k-1] + beta.XAC[2] * XAC + beta.Xautolog[1] * Xauto[,,k-1]) gamma[,,k-1] <- plogis(qlogis(mean.gamma[k-1]) + beta.Xgamma * Xgamma[,,k-1] + beta.XAC[3] * XAC + beta.Xautolog[2] * Xauto[,,k-1]) # Compute latent states and plot muZ[,,k] <- z[,,k-1]*phi[,,k-1] + (1-z[,,k-1])*gamma[,,k-1] z[,,k] <- rbinom(M, 1, muZ[,,k]) # Compute autocovariate and plot Xautovec <- amatrix %*% c(z[,,k]) Xauto[,,k] <- matrix(Xautovec/nneigh, ncol = side) # re-assemble by column # Do the in-loop plots # -------------------- if(show.plots) { tryPlot <- try( { image(1:side, 1:side, phi[,,k-1], col=topo.colors(100), main = paste("Persistence between year", k-1, "and year", k), xlab = 'x', ylab = 'y') image(1:side, 1:side, gamma[,,k-1], col=topo.colors(100), main = paste("Colonization between year", k-1, "and year", k), xlab = 'x', ylab = 'y') image(1:side, 1:side, z[,,k], col=c("white", "black"), main = paste('Presence/absence (z) in year', k, ':\n black = occupied, white = unoccupied'), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, Xauto[,,k], col=topo.colors(100), main = paste("Autocovariate between year", k, "and year", k+1), xlab = 'x', ylab = 'y') }, silent = TRUE) if(inherits(tryPlot, "try-error")) { show.plots <- FALSE tryPlotError(tryPlot) } } } # (d) Observation process parameters # First choose values of annual SD of p random effects sd.site <- seq(from = trend.sd.site[1], to = trend.sd.site[2], length.out = nyears) sd.survey <- seq(from = trend.sd.survey[1], to = trend.sd.survey[2], length.out = nyears) for(k in 1:nyears){ # Site random effects eps1 <- matrix(rnorm(n = M, mean = 0, sd = sd.site[k]), ncol = side) # Survey random eff. eps2 <- rnorm(n = nsurveys, mean = 0, sd = sd.survey[k]) for(j in 1:nsurveys){ p[,,j,k] <- plogis(qlogis(mean.p[k]) + beta.Xp * Xp[,,j,k] + beta.XAC[4] * XAC + eps1[,] + eps2[j]) } } # Simulate actual observation process (also updating entire grid in one go) for(k in 1:nyears){ # Loop over years for(j in 1:nsurveys){ # Loop over replicates prob <- z[,,k] * p[,,j,k] # zero out p for unoccupied sites y[,,j,k] <- rbinom(M, 1, prob) # image(1:side, 1:side, y[,,j,k], main = paste("Year", k, "and rep", j)) # Look at clumped pattern in y } } # Derived quantities # Compute annual population occupancy for (k in 2:nyears){ psi[,,k] <- psi[,,k-1]*phi[,,k-1] + (1-psi[,,k-1])*gamma[,,k-1] } mean.psi <- apply(psi, 3, mean) # Average psi over sites # Compute true and observed number of occupied sites zobs <- apply(y, c(1,2,4), max) nocc <- apply(z, 3, sum) nocc.obs <- apply(zobs, 3, sum) # Do the post-loop plots # ---------------------- psi.app <- apply(apply(y, c(1,2,4), max), 3, mean) if(show.plots) { tryPlot <- try( { # (4) More plots comparing true states and observations # Plot realised and apparent occupancy par(mfrow = c(1,1)) plot(year, apply(z, 3, mean), type = "l", xlab = "Year", ylab = "Occupancy or Detection prob.", col = "red", xlim = c(0,nyears+1), ylim = c(0,1), lwd = 2, lty = 1, frame.plot = FALSE, las = 1) lines(year, mean.p, type = "l", col = "red", lwd = 2, lty = 2) lines(year, psi.app, type = "l", col = "black", lwd = 2) text(0.8*nyears, 0.1, labels = "red solid - true occupancy prob.\n red dashed - detection prob.\n black - observed proportion occupied", cex = 1) # Plots comparing true and observed latent states par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) for(k in 1:nyears){ image(1:side, 1:side, z[,,k], col=c("white", "black"), main = paste('Presence/absence (z) in year', k), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, zobs[,,k], col=c("white", "black"), main = paste('Ever_detected (zobs) in year', k), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Compute values of naive autocovariate (observed prop. # of occupied neighbouring cells) Xautoobs <- array(NA, dim = dim(zobs)) for(k in 1:nyears){ # Loop over years for(i1 in 1:side){ # Loop over one side (hopefully X) for(i2 in 1:side){ # Loop over other side (hopefully Y) i1.start <- max(1,(i1-1)) i1.end <- min(side,(i1+1)) i2.start <- max(1,(i2-1)) i2.end <- min(side,(i2+1)) Xautoobs[i1,i2,k] <- (sum(zobs[i1.start:i1.end,i2.start:i2.end,k])- zobs[i1,i2,k]) / (length(zobs[i1.start:i1.end,i2.start:i2.end,k]) - 1) } } } out <- list( # ----------------- values input ----------------------- side=side, nyears=nyears, nsurveys=nsurveys, mean.psi1=mean.psi1, beta.Xpsi1=beta.Xpsi1, range.phi=range.phi, beta.Xphi=beta.Xphi, range.gamma=range.gamma, beta.Xgamma=beta.Xgamma, range.p=range.p, beta.Xp=beta.Xp, theta.XAC=theta.XAC, beta.XAC= beta.XAC, beta.Xautolog=beta.Xautolog, trend.sd.site=trend.sd.site, trend.sd.survey=trend.sd.survey, seed=seed, seed.XAC = seed.XAC, # ----------------- values generated -------------------- M=M, # total number of pixels in the study area grid=grid, # 2-column matrix, the x and y coordinates of the pixels amatrix = amatrix, # MxM matrix, [i,j] = 1 if cell i and j are neighbours, 0 otherwise Xpsi1 = Xpsi1, # side x side matrix, value of covariate affecting initial occupancy (psi1) Xphi = Xphi, # side x side x nyears array, value of covariate affecting persistence (phi) Xgamma = Xgamma, # side x side x nyears array, value of covariate affecting colonisation (gamma) Xp = Xp, # side x side x nsurveys x nyears array, value of covariate affecting detection (p) XAC=XAC, # side x side matrix, the spatially correlated covariate Xauto = Xauto, # side x side x nyears array, autocovariate, proportion of neighbouring cells occupied Xautoobs = Xautoobs,# side x side x nyears array, observed autocovariate, proportion of neighbouring cells where species detected sd.site=sd.site, # vector nyears, year-specific values of SD of Gaussian random site effects in p sd.survey=sd.survey,# vector nyears, year-specific values of SD of Gaussian random survey effects in p mean.phi=mean.phi, # vector nyears-1, year-specific intercept of persistence on probability scale mean.gamma=mean.gamma,# vector nyears-1, year-specific intercept of colonisation on probability scale mean.p=mean.p, # vector nyears, year-specific intercept of detection probability on probability scale psi=psi, # side x side x nyears array, probability of occupancy of cell mean.psi=mean.psi, # vector nyears, mean occupancy over all cells psi.app=psi.app, # vector nyears, apparent occupancy, proportion of cells where species detected z=z, # side x side x nyears array, true occupancy status of each cell in each year (1 if occupied) zobs=zobs, # side x side x nyears array, observed occupancy status of each cell in each year (1 if detected) nocc = nocc, # vector nyears, the true number of cells occupied each year nocc.obs = nocc.obs,# vector nyears, the number of cells where species detected each year phi=phi, # side x side x nyears-1 array, probability of persistence in each interval between years gamma=gamma, # side x side x nyears-1 array, probability of colonisation in each interval between years p=p, # side x side x nsurveys x nyears array, probability of detection y = y) # side x side x nsurveys x nyears array, detection history, 1 if species detected. # Add an unmarked data frame object out$umf <- conv2UM(out) return(out) } # ------------------------------------------------------------------ # Helper function that turns the simulated data into an unmarked data frame conv2UM <- function(d){ # Function creates an unmarked data frame for model-fitting function colext # (= dynamic occupancy model) using output of the data simulation function. # # The function works for up to 9999 years = 'seasons' or 'primary periods'. # # Marc Kery, 12 Dec 2014; mangled by Mike Meredith, 17 May 2019 # # Function arguments: # d: output object of the data sim function # Row names to be used for everything: names <- paste(d$grid[,1], d$grid[,2], sep = '.') yy <- matrix(d$y, d$M, d$nsurveys * d$nyears) Xp <- matrix(d$Xp, d$M, d$nsurveys * d$nyears) rownames(yy) <- rownames(Xp) <- names siteCovs <- data.frame(Xpsi1 = c(d$Xpsi1), XAC = c(d$XAC)) rownames(siteCovs) <- names if(d$nyears < 100) { yrChar <- sprintf("%02i", 1:d$nyears) } else { yrChar <- sprintf("%04i", 1:d$nyears) } yearlySiteCovs <- list( year = matrix(yrChar, d$M, d$nyears, byrow=TRUE, dimnames=list(names, NULL)), Xphi = matrix(d$Xphi, d$M, d$nyears, dimnames=list(names, NULL)), Xgamma = matrix(d$Xgamma, d$M, d$nyears, dimnames=list(names, NULL)), Xauto = matrix(d$Xauto, d$M, d$nyears, dimnames=list(names, NULL)), Xautoobs = matrix(d$Xautoobs, d$M, d$nyears, dimnames=list(names, NULL)) ) # Create and return unmarked data frame for the colext function return(unmarked::unmarkedMultFrame(y=yy, siteCovs=siteCovs, yearlySiteCovs=yearlySiteCovs, obsCovs = list(Xp = Xp), numPrimary = d$nyears)) } # --------------------------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDynoccSpatial_AHM2_9.R
# AHM2 chapter 4 # Revised 4 Dec 2018, 1 March 2019 # ------------------ Start function definition --------------------- simDynocc<- function(nsites = 250, nyears = 10, nsurveys = 3, year.of.impact = NA, mean.psi1 = 0.4, beta.Xpsi1 = 0, range.phi = c(0.5, 1), sd.lphi.site = 0, impact.phi = 0, beta.Xphi = 0, range.gamma = c(0, 0.5), sd.lgamma.site = 0, impact.gamma = 0, beta.Xgamma = 0, sd.lphi.lgamma.site = 0, range.p = c(0.1, 0.9), beta.Xp = 0, range.beta1.survey = c(0, 0), range.beta2.survey = c(0, 0), trend.sd.site = c(0, 0), trend.sd.survey = c(0, 0), trend.sd.site.survey = c(0, 0), show.plots = TRUE) { # # Written by Marc Kery, 4 Dec 2014 - 4 December 2018 # # Function to simulate detection/nondetection data under a general # dynamic site-occ model, including: # * annual variation in the probabilities of site persistence and colonization # and detection is specified by the bounds of a uniform distribution. # * one covariate is allowed to affect a parameter: a site covariate for psi1, # a site-by-year covariate for phi and gamma and an # observational covariate for p # * Additional detection heterogeneity at the site, survey (= occasion), # or the site-by-survey level, with the possibility of a temporal trend in # these types of heterogeneity over the years. # For instance, an annual trend in detection heterogeneity at the site # or the survey level is specified by different first and second values, # which correspond to the heterogeneity in the first and the last year, # with a linear trend interpolated for the intervening years. # As an example, trend.sd.site = c(0, 1) will result in # a linear trend in the magnitude of site-level heterogeneity in detection # from 0 in the first year to 1 in the last year. # * Additional detection heterogeneity that varies over the season # (= among occasions) according to a linear and quadratic occasion effect # (e.g., to model the phenology of an insect species). # * Data simulation under a BACI (before-after-control-impact) design is # possible,where an event happens in a specified year and # reduces phi or gamma by a stated percentage (only reductions possible) # # Function arguments: # ------------------- # ** Sample size arguments ** # nsites: Number of sites # nyears: Number of years (or 'seasons') # nsurveys: Number of replicate surveys (= occasions) within a year # # ** Arguments to set intercepts of regressions ** # mean.psi1 - average occupancy probability in first year # range.p - bounds of uniform distribution from which annual p drawn # range.psi and range.gamma - same for survival and colonization probability # ------------------- # # ** Arguments to set slopes of regressions ** # beta.Xpsi1, beta.Xphi, beta.Xgamma, beta.Xp - covariate coefficient in # prob. of initial occupancy, persistence, colonization and detection. # ------------------- # # *** Args.for detection heterogeneity among sites, occasions and site/survey # trend.sd.site: sd of normal distribution to model logit-normal noise in p # at the site level in the first and the last year of the simulation, # with value of intervening years interpolated. # trend.sd.survey: sd of normal distribution to model logit-normal noise in p # ONLY at the rep = occasion = 'survey' level, in the first and # the last year, with interpolation for intervening years # trend.sd.site.survey: sd of normal distribution to model logit-normal noise # in p at the site/year/rep level, in the first and the # last year, with interpolation in between # For these arguments, if the two values in the range are the # same, a constant value is assumed over time, while if they are different, # a linear trend is assumed over time. # range.beta1.occasion is the range of the annual variation in the linear effect # of the survey occasion (e.g., of month 1-12 when nsurveys = 12) # on detection (= product of availability and perceptibility) # range.beta2.occasion is the same for the quadratic effect of survey occasion # ------------------- # # ** Arguments for the BACI design ** # year.of.impact: year in which an impact happens, # which can then affect phi and gamma; NA means no impact # impact.phi: negative effect in percent on annual phi, ignored if no impact; # (e.g., impact.phi = 20 means a 20% reduction in phi) # impact.gamma: negative effect in percent on annual gamma, ignored if no impact. # Note that for the BACI design, nyears must be greater than 2 and # year.of.impact must not be equal to the first or the last year # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nyears <- round(nyears[1]) nsurveys <- round(nsurveys[1]) year.of.impact <- round(year.of.impact[1]) if(!is.na(year.of.impact)) stopifnotGreaterthan(nyears, 2) stopifnotBetween(year.of.impact, 2, nyears-1, allowNA=TRUE) stopifnotProbability(range.phi) # bounds stopifnotBetween(impact.phi, 0, 100) stopifnotProbability(range.gamma) # bounds stopifnotBetween(impact.gamma, 0, 100) stopifnotProbability(range.p) # bounds stopifnotLength(trend.sd.site, 2) # trend stopifNegative(trend.sd.site) stopifnotLength(trend.sd.survey, 2) # trend stopifNegative(trend.sd.survey) stopifnotLength(trend.sd.site.survey, 2) # trend stopifNegative(trend.sd.site.survey) # ---------------------------------------------------------------- # Set up arrays needed site <- 1:nsites # Sites year <- 1:nyears # Years visit <- 1:nsurveys # Surveys (= months, visits, occasions) psi <- muZ <- z <- array(dim = c(nsites, nyears), dimnames = list(paste('Site', site, sep = ''), paste('Year', year, sep = ''))) # Occupancy, occurrence phi <- gamma <- array(NA, dim = c(nsites, (nyears-1)), dimnames = list(paste('Site', site, sep = ''), paste('Year', year[-nyears], sep = ''))) # Survival, colonisation y <- p <- array(NA, dim = c(nsites, nsurveys, nyears), dimnames = list(paste('Site', site, sep = ''), paste('Visit', visit, sep = ''), paste('Year', year, sep = '')))# Det. hist and p # Create covariates # Site covariate for psi1 Xpsi1 <- runif(nsites, -2, 2) # Yearly-site covariates for phi and gamma Xphi <- array(runif(nsites*nyears, -2, 2), dim = c(nsites,nyears)) Xgamma <- array(runif(nsites*nyears, -2, 2), dim = c(nsites,nyears)) # Observational covariate for p Xp <- array(runif(nsites*nsurveys*nyears,-2,2),dim=c(nsites,nsurveys,nyears)) # Create impact covariate for the BACI effect on phi and gamma impact <- rep(0, (nyears-1)) if(!is.na(year.of.impact)) impact[year.of.impact:(nyears-1)] <- 1 # (1) Simulate all parameter values # (a) State process parameters psi[,1] <- plogis(qlogis(mean.psi1) + beta.Xpsi1 * Xpsi1) mean.phi <- runif(n = nyears-1, min = min(range.phi), max = max(range.phi)) mean.gamma <- runif(n = nyears-1, min = min(range.gamma), max = max(range.gamma)) # new 2019-03-01: heterogeneity in phi and gamma across sites eps.lphi.site <- rnorm(n = nsites, mean = 0, sd = sd.lphi.site) eps.lgamma.site <- rnorm(n = nsites, mean = 0, sd = sd.lgamma.site) eps.lphi.lgamma.site <- rnorm(n = nsites, mean = 0, sd = sd.lphi.lgamma.site) # BACI effect on phi and gamma: negative effect on persistence/colonisation BACI.effect.phi <- (mean.phi * (impact.phi/100) * impact) BACI.effect.gamma <- (mean.gamma * (impact.gamma/100) * impact) # These will be zero if no BACI impact # Assemble effects of year, impact and covariates on phi and gamma for(t in 1:(nyears-1)){ phi[,t] <- plogis(qlogis(mean.phi[t] - BACI.effect.phi[t]) + eps.lphi.site + eps.lphi.lgamma.site + beta.Xphi * Xphi[,t]) gamma[,t] <- plogis(qlogis(mean.gamma[t] - BACI.effect.gamma[t]) + eps.lgamma.site + eps.lphi.lgamma.site + beta.Xgamma * Xgamma[,t]) } # (b) Observation process parameters mean.p <- runif(n = nyears, min = min(range.p), max = max(range.p)) beta1 <- runif(n = nyears, min = min(range.beta1.survey), max = max(range.beta1.survey)) beta2 <- runif(n = nyears, min = min(range.beta2.survey), max = max(range.beta2.survey)) # Next two allow incorporation of trend over time sd.site <- seq(from = trend.sd.site[1], to = trend.sd.site[2], length.out = nyears) sd.survey <- seq(from = trend.sd.survey[1], to = trend.sd.survey[2], length.out = nyears) sd.site.survey <- seq(from = trend.sd.site.survey[1], to = trend.sd.site.survey[2], length.out = nyears) # Define and fill the array of site/survey random effects eps3 <- array(dim = c(nsites, nsurveys, nyears)) for(t in 1:nyears){ eps3[,,t] <- matrix(rnorm(n = nsites*nsurveys, sd = sd.site.survey[t]), ncol = nsurveys) } for(i in 1:nsites){ # Sites for(t in 1:nyears){ # Years eps1 <- rnorm(n = nsites, sd = sd.site[t]) # Zero-mean site random eff. eps2 <- rnorm(n = nsurveys, sd = sd.survey[t]) # Zero-mean survey random eff. # ZM site.survey ranef. for(j in 1:nsurveys){ # Months p[i,j,t] <- plogis(qlogis(mean.p[t]) + beta.Xp*Xp[i,j,t] + eps1[i] + eps2[j] + eps3[i,j,t] + beta1[t] * (j - (nsurveys/2)) + beta2[t] * (j - (nsurveys/2))^2) } } } # (2) Simulate the true system dynamics (state process) # First year z[,1] <- rbinom(nsites, 1, psi[,1]) # Initial occurrence state # Years 2:nyears for(i in 1:nsites){ # Loop over sites for(t in 2:nyears){ # Loop over years muZ[i,t] <- z[i, t-1]*phi[i,t-1] + (1-z[i, t-1])*gamma[i,t-1] z[i,t] <- rbinom(1, 1, muZ[i,t]) } } # (3) Simulate observation process to get the observed data for(i in 1:nsites){ # Loop over sites for(t in 1:nyears){ # Loop over years for(j in 1:nsurveys){ # Loop over replicates prob <- z[i,t] * p[i,j,t] # zero out p for unoccupied sites y[i,j,t] <- rbinom(1, 1, prob) } } } # (4) Compute annual population occupancy for(i in 1:nsites){ for (t in 2:nyears){ psi[i,t] <- psi[i,t-1]*phi[i,t-1] + (1-psi[i,t-1])*gamma[i,t-1] } } n.occ <- apply(z, 2, sum) # True number of occupied sites n.occ.ever <- sum(apply(z, 1, max)) # True number of occupied sites ever zobs <- apply(y, c(1,3), max) # Observed presence-absence matrix n.occ.obs <- apply(zobs, 2, sum) # Observed number of occupied sites n.occ.ever.obs <- sum(apply(zobs, 1, max)) # Obs. number of occupied sites ever psi.fs <- apply(z, 2, mean) # Finite-sample occupancy proportion mean.psi <- apply(psi, 2, mean) # Average psi over sites psi.app <- apply(apply(y, c(1,3), max), 2, mean) # Apparent occupancy (finite sample) # Compute average product of availability and detection in each occasion # (ignoring the other terms in the model for detection) p.occasion <- array(NA, dim = c(nsurveys, nyears)) for(t in 1:nyears){ # Years p.occasion[,t] <- plogis(qlogis(mean.p[t]) + beta1[t] * (visit - (nsurveys/2)) + beta2[t] * (visit - (nsurveys/2))^2) } # Compute annual average of phi, gamma and p avg.phi <- colMeans(phi) avg.gamma <- colMeans(gamma) avg.p <- colMeans(apply(p, c(1,3), mean)) # (5) Plots of stuff if(show.plots){ # Restore graphical settings on exit oldpar <- par("mfrow", "mar", "cex.lab", "cex.axis") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) tryPlot <- try( { # ------ Plot A --------- par(mfrow = c(2, 2), mar = c(5,5,5,3), cex.lab = 1.2, cex.axis = 1.2) # Get predicted covariate relationships and plot them in single graph pred.cov <- seq(-2, 2, length.out = 100) psi.pred <- plogis(qlogis(mean.psi1) + beta.Xpsi1 * pred.cov) phi.pred <- plogis(mean(qlogis(mean.phi)) + beta.Xphi * pred.cov) gamma.pred <- plogis(mean(qlogis(mean.gamma)) + beta.Xgamma * pred.cov) p.pred <- plogis(mean(qlogis(mean.p)) + beta.Xp * pred.cov) plot(pred.cov, psi.pred, type = 'n', xlim = c(-2, 2), ylim = c(0,1), main = 'Covariate relationships', xlab = 'Covariate value', ylab = 'Predicted probability', frame = FALSE, las=1) lines(pred.cov, psi.pred, type = 'l', col = 3, lwd = 3, lty=1) lines(pred.cov, phi.pred, type = 'l', col = 4, lwd = 3, lty=2) lines(pred.cov, gamma.pred, type = 'l', col = 1, lwd = 3, lty=3) lines(pred.cov, p.pred, type = 'l', col = 2, lwd = 1, lty=1) legend('top', legend = c('psi1', 'phi', 'gamma', 'p'), col = c(3,4,1,2), lty = c(1,2,3,1), lwd = c(3,3,3,1), inset=c(0, -0.15), bty='n', xpd=NA, horiz=TRUE) # Within-season pattern of detection (= product of availability and detection) # (ignoring the other terms in the model for detection) matplot(visit, p.occasion, type = 'l', lty = 1, lwd = 2, main = 'Within-season pattern in p over the years \n(only occasion terms)', xlab = 'Survey', ylab = 'Detection probability', ylim = c(0,1), frame = FALSE, xaxt='n') axis(1, at=1:nsurveys) # Histogram of detection hist(p, col = 'lightgrey', xlim = c(0,1), main = 'Detection probability p') # Plot realised and apparent proportion of occupied sites plot(year, avg.p, type = "n", xlab = "Year", ylab = "Probability", xlim = c(1,nyears), ylim = c(0,1), frame.plot = FALSE, las = 1, xaxt='n', main = 'True occupancy (finite-sample), \nobserved occupancy (prop. sites occupied) and average p') axis(1, 1:nyears) lines(year, apply(z, 2, mean), type = "l", col = 2, lwd = 2, lty = 1) lines(year, psi.app, type = "l", col = 1, lwd = 2, lty=2) lines(year, avg.p , type = "l", col = 2, lwd = 2, lty = 3) if(!is.na(year.of.impact)) { abline(v=year.of.impact+0.5, col='grey', lwd=2) text(year.of.impact+0.5, 0, "impact", adj=c(0.5, 0.5))#pos=1, offset=0) } legend('top', legend = c('True psi', 'Observed psi', 'Detection'), col = c(2,1,2), lty = c(1,2,3), lwd = 2, inset=c(0, -0.15), bty='n', xpd=NA, horiz=TRUE) # ------ Plot B --------- # Plot of population sizes (ever occupied, occupied per year, true and observed) # And comparison with annual vals of colonisation, persistence and detection par(mfrow = c(1, 2), mar = c(5,5,5,3), cex.lab = 1.2, cex.axis = 1.2) # Annual average of colonisation, persistence and detection plot(1:(nyears-1), avg.gamma, type = "n", xlab = "Year or Yearly interval", ylab = "Probability", xlim = c(0.5, nyears), ylim = c(0,1), las = 1, xaxt='n', frame.plot = FALSE, main = 'Average annual persistence,\ncolonization, and detection') axis(1, at=1:nyears) lines(1:(nyears-1), avg.phi, type = "o", pch=16, col = 4, lwd = 2, lty=3) lines(1:(nyears-1), avg.gamma, type = "o", pch=16, col = 1, lwd = 2, lty=2) lines(1:nyears, avg.p, type = "o", pch=16, col = 2, lwd = 2) if(!is.na(year.of.impact)) { abline(v=year.of.impact-0.5, col='grey', lwd=2) text(year.of.impact-0.5, 0, "impact", pos=1, offset=0) } legend('top', c("phi", "gamma", "p"), lty=c(3,2,1), lwd=2, col=c(4,1,2), inset=c(0, -0.05), bty='n', xpd=NA, horiz=TRUE) # True and observed number of occupied sites per year and overall (ever) plot(1, 1, type = "n", xlab = "Year", ylab = "Number of sites", xlim = c(0.5, nyears+1.5), xaxt='n', ylim = range(c(0, n.occ.ever, n.occ.obs)), frame.plot = FALSE, las = 1, main = 'True and obs. number of occupied sites \n per year and for all years combined (ever)') axis(1, at=1:nyears) end <- nyears/2 + 0.5 mid <- mean(c(0.5, end)) segments(0.5, n.occ.ever, end, n.occ.ever, lwd=2, col=2) text(mid, n.occ.ever, "True ever", pos=3, xpd=TRUE) segments(0.5, n.occ.ever.obs, end, n.occ.ever.obs, lwd=2, lty=2) text(mid, n.occ.ever.obs, "Observed ever", pos=1, xpd=TRUE) points(1:nyears, n.occ, type = "b", col = 2, pch = 16, cex = 1.5, lwd=3) points(1:nyears, n.occ.obs, type = "b", col = 1, pch=20, lty=2, cex = 1.5, lwd=3) if(!is.na(year.of.impact)) { abline(v=year.of.impact+0.5, col='grey', lwd=2) text(year.of.impact+0.5, 0, "impact", pos=1, offset=0) } legend('topright', legend = c('True annual', 'Obs. annual'), col = c(2,1), pch = c(16, 16), lty = c(1,1), pt.cex=1.5, lwd = 3, bty = 'n') }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Return data return(list(nsites=nsites, nyears=nyears, nsurveys=nsurveys,year.of.impact = year.of.impact, impact = impact, mean.psi1=mean.psi1, beta.Xpsi1=beta.Xpsi1, range.phi=range.phi, sd.lphi.site = sd.lphi.site, impact.phi = impact.phi, beta.Xphi=beta.Xphi, BACI.effect.phi = BACI.effect.phi, range.gamma=range.gamma, sd.lgamma.site = sd.lgamma.site, impact.gamma = impact.gamma, beta.Xgamma=beta.Xgamma, sd.lphi.lgamma.site = sd.lphi.lgamma.site, BACI.effect.gamma = BACI.effect.gamma, range.p=range.p, beta.Xp=beta.Xp, trend.sd.site=trend.sd.site, trend.sd.survey=trend.sd.survey, range.beta1.survey = range.beta1.survey, range.beta2.survey = range.beta2.survey, beta1 = beta1, beta2 = beta2, p.occasion = p.occasion,sd.site=sd.site, sd.survey=sd.survey, mean.phi=mean.phi, mean.gamma=mean.gamma, mean.p=mean.p, avg.phi=avg.phi, avg.gamma=avg.gamma, avg.p=avg.p, psi=psi, mean.psi=mean.psi, n.occ = n.occ, n.occ.ever = n.occ.ever, n.occ.obs = n.occ.obs, n.occ.ever.obs = n.occ.ever.obs, psi.fs = psi.fs, psi.app=psi.app, z=z, phi=phi, gamma=gamma, p=p, y = y, Xpsi1 = Xpsi1, Xphi = Xphi, Xgamma = Xgamma, Xp = Xp, eps.lphi.site = eps.lphi.site, eps.lgamma.site = eps.lgamma.site, eps.lphi.lgamma.site = eps.lphi.lgamma.site, eps1 = eps1, eps2 = eps2, eps3 = eps3)) } # ------------------ End function definition ---------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simDynocc_AHM2_3_Simulate_dynamic_occupancy.R
# Define function for simulating spatially correlated random field # AHM2 - 9.2 # Called by functions `simNmixSpatial` and `simOccSpatial`. # Modified to use package 'fields' if 'RandomFields' is not available. # In DESCRIPTION file: # - add 'fields' to Imports # - move RandomFields from Imports to Suggests # - 8-21-2023: RandomFields completely removed as it does not seem to be coming back to CRAN # In NAMESPACE file: # - comment out or delete importFrom("RandomFields", "RFoptions", "RFsimulate", "RMexp") # - add: importFrom("fields", "circulantEmbeddingSetup", "circulantEmbedding") # ------------ Start of function definition ---------------- simExpCorrRF <- function(variance = 1, theta = 1, size = 50, seed = NA, show.plots = TRUE){ # Function creates Gaussian random field with negative # exponential correlation and visualizes correlation and random field # # Function arguments: # theta: parameter governing spatial correlation (=1/phi) # ("large theta means long range of correlation") # Note that RMexp is specified in terms of phi = 1/theta # variance: variance of field, set at 1 # grid.size: Number of pixels in either direction # show.plot: if TRUE, plots of the data will be displayed; # set to FALSE if you are running simulations or use inside of other fct's. # Generate correlated random variables in a square step <- 1 x <- seq(1, size, step) y <- seq(1, size, step) # grid <- as.matrix(expand.grid(x,y)) grid <- cbind(x = rep(x, each=size), y = y) # RandomFields now completely removed from package #if(requireNamespace("RandomFields", quietly=TRUE)) { # RandomFields::RFoptions(seed=seed) # field <- RandomFields::RFsimulate(RandomFields::RMexp(var = variance, scale = theta), # x=x, y=y, grid=TRUE)@data$variable1 # RandomFields::RFoptions(seed=NA) #} else { message("Using package 'fields' instead of 'RandomFields'; see help(simExpCorrRF).") if(!is.na(seed)) set.seed(seed) # Only for compatibility with RandomFields, better to set seed before calling simExpCommRF obj <- circulantEmbeddingSetup(grid=list(x=x, y=y), Covariance="Exponential", aRange=theta) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'size' or 'theta'.") field <- as.vector(tmp * sqrt(variance)) #} # Plots # Correlation function if(show.plots){ oldpar <- par(mfrow = c(1,2), mar = c(5,5,4,2), "cex.main") ; on.exit(par(oldpar)) tryPlot <- try( { dis <- seq(0.01, 20, by = 0.01) corr <- exp(-dis/theta) plot(dis, corr, type = "l", xlab = "Distance", ylab = "Correlation", ylim = c(0,1), col = "blue", lwd = 2) text(0.8*max(dis), 0.8, labels = paste("theta:", theta)) # Random field # image(x, y, field,col=topo.colors(20), main = paste("Gaussian random field with \n negative exponential correlation (theta =", theta, ")"), cex.main = 1) par(mar = c(3,2,5,1)) raster::plot(rasterFromXYZ(cbind(grid, field)), col=topo.colors(20), main = paste("Gaussian random field with \n negative exponential correlation (theta =", theta, ")"), cex.main = 1, legend=FALSE, box=FALSE) box() }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list(variance = variance, theta = theta, size = size, seed = seed, field = field, grid = grid)) } # ------------ End of function definition ----------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simExpCorrRF.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # sim.fn - AHM1 section 1.1 p4 # A function to help to understand the relationship between point patterns, # abundance data and occurrence data (also called presence/absence or distribution data) # (introduced in AHM1 Section 1.1) sim.fn <- function(quad.size = 10, cell.size = 1, intensity = 1, show.plot = TRUE){ # # Function that simulates animal or plant locations in space according # to a homogenous Poisson process. This process is characterized by the # intensity, which is the average number of points per unit area. # The resulting point pattern is then discretized to obtain abundance data and # presence/absence (or occurrence) data. The discretization of space is # achieved by choosing the cell size. # Note that you must choose cell.size such that the ratio of quad.size # to cell.size is an integer. # Argument show.plot should be set to FALSE when running simulations # to speed things up. # Compute some preliminaries exp.M <- intensity * quad.size^2 # Expected population size in quadrat breaks <- seq(0, quad.size, cell.size) # boundaries of grid cells n.cell <- (quad.size / cell.size)^2 # Number of cells in the quadrat mid.pt <- breaks[-length(breaks)] + 0.5 * cell.size # cell mid-points # Simulate three processes: point process, cell abundance summary and cell occurrence summary # (1) Generate and plot the mother of everything: point pattern M <- rpois(1, exp.M) # Realized population size in quadrat is Poisson u1 <- runif(M, 0, quad.size) # x coordinate of each individual u2 <- runif(M, 0, quad.size) # y coordinate of each individual # (2) Generate abundance data # Summarize point pattern per cell: abundance (N) is number of points per cell N <- as.matrix(table(cut(u1, breaks=breaks), cut(u2, breaks= breaks))) lambda <- round(mean(N),2) # lambda: average realized abundance per cell var <- var(c(N)) # Spatial variance of N # (3) Generate occurrence (= presence/absence) data # Summarize point pattern even more: # occurrence (z) is indicator for abundance greater than 0 z <- N ; z[z>1] <- 1 # Convert abundance info to presence/absence info psi <- mean(z) # Realized occupancy in sampled sites # Visualisation if(show.plot){ op <- par(mfrow = c(2, 2), mar = c(5,5,5,2), cex.lab = 1.5, cex.axis = 1.3, cex.main = 1.3) on.exit(par(op)) tryPlot <- try( { # (1) Visualize point pattern plot(u1, u2, xlab = "x coord", ylab = "y coord", cex = 1, pch = 16, asp = 1, main = paste("Point pattern: \nIntensity =", intensity, ", M =", M, "inds."), xlim = c(0, quad.size), ylim = c(0, quad.size), frame = FALSE, col = "red") # plot point pattern polygon(c(0, quad.size, quad.size, 0), c(0,0, quad.size, quad.size), lwd = 3, col = NA, border = "black") # add border to grid boundary # (2) Visualize abundance pattern # Plot gridded point pattern with abundance per cell plot(u1, u2, xlab = "x coord", ylab = "y coord", cex = 1, pch = 16, asp = 1, main = paste("Abundance pattern: \nRealized mean density =", lambda, "\nSpatial variance =", round(var,2)), xlim = c(0, quad.size), ylim = c(0, quad.size), frame = FALSE, col = "red") # plot point pattern # Overlay grid onto study area for(i in 1:length(breaks)){ for(j in 1:length(breaks)){ segments(breaks[i], breaks[j], rev(breaks)[i], breaks[j]) segments(breaks[i], breaks[j], breaks[i], rev(breaks)[j]) } } # Print abundance (N) into each cell for(i in 1:length(mid.pt)){ for(j in 1:length(mid.pt)){ text(mid.pt[i],mid.pt[j],N[i,j],cex =10^(0.8-0.4*log10(n.cell)),col="blue") } } polygon(c(0, quad.size, quad.size, 0), c(0,0, quad.size, quad.size), lwd = 3, col = NA, border = "black") # add border to grid boundary # (3) Visualize occurrence (= presence/absence) pattern # Summarize point pattern even more: # occurrence (z) is indicator for abundance greater than 0 plot(u1, u2, xlab = "x coord", ylab = "y coord", cex = 1, pch = 16, asp = 1, main = paste("Occurrence pattern: \nRealized occupancy =", round(psi,2)), xlim = c(0, quad.size), ylim = c(0, quad.size), frame = FALSE, col = "red") # plot point pattern # Overlay grid onto study area for(i in 1:length(breaks)){ for(j in 1:length(breaks)){ segments(breaks[i], breaks[j], rev(breaks)[i], breaks[j]) segments(breaks[i], breaks[j], breaks[i], rev(breaks)[j]) } } # Shade occupied cells (which have abundance N > 0 or occurrence z = 1) for(i in 1:(length(breaks)-1)){ for(j in 1:(length(breaks)-1)){ polygon(c(breaks[i], breaks[i+1], breaks[i+1], breaks[i]), c(breaks[j], breaks[j], breaks[j+1], breaks[j+1]), col = "black", density = z[i,j]*100) } } polygon(c(0, quad.size, quad.size, 0), c(0,0, quad.size, quad.size), lwd = 3, col = NA, border = "black") # add border to grid boundary # (4) Visualize abundance distribution across sites # plot(table(N), xlab = "Abundance (N)", ylab = "Number of cells", # col = "black", xlim = c(0, max(N)), main = "Frequency of N with mean density (blue)", lwd = 3, frame = FALSE) histCount(N, NULL, xlab = "Abundance (N)", ylab = "Number of cells", color = "grey", main = "Frequency of N with mean density (blue)") abline(v = lambda, lwd = 3, col = "blue", lty=2) }, silent = TRUE ) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Numerical output return(list(quad.size = quad.size, cell.size = cell.size, intensity = intensity, exp.N = exp.M, breaks = breaks, n.cell = n.cell, mid.pt = mid.pt, M = M, u1 = u1, u2 = u2, N = N, z = z, psi = psi)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simFn_AHM1_1-1_Simulate_Poisson_process.R
# Function for section 2.9.1, Diseased Frogs simFrogDisease <- function(nsites = 100, nyears = 3, nsurveys = 3, alpha.lam = 3, # Mean abundance at t=1 omega = c(0.9, 0.7), # State-specific survival gamma = c(2,1), # State-specific recruitment p = c(0.8, 0.8, 0.8), # Detection probability recovery = 0.1, # Pr recovery given diseased infection = 0.1){ # Pr infection given not diseased # Empty matrices to hold the data yN <- yI <- array(NA, dim = c(nsites, nyears, nsurveys)) NI <- NN <- array(NA, dim = c(nsites, nsurveys)) # First season NN[,1] <- rpois(n = nsites, lambda = alpha.lam) NI[,1] <- rpois(n = nsites, lambda = alpha.lam) for(i in 1:nsites){ for(j in 1:nyears){ yN[i,j, 1] <- rbinom(n = 1, NN[i,1], p[1]) yI[i,j, 1] <- rbinom(n = 1, NI[i,1], p[1]) } } SN <- SI <- GI <- GN <- TrN <- TrI <- array(0, dim = c(nsites, nsurveys-1)) # Second and subsequent seasons for(k in 2:nsurveys){ for(i in 1:nsites){ if(NN[i,k-1]>0){ SN[i, k-1] <- rbinom(n=1, size=NN[i,k-1], prob=omega[1]) # Survival of uninfecteds TrN[i,k-1] <- rbinom(n=1, size=SN[i,k-1], prob=infection) # Getting infected - lost from NN, and gained by NI } if(NI[i,k-1]>0){ SI[i, k-1] <- rbinom(n=1, size=NI[i,k-1], prob=omega[2]) # Survival of infecteds TrI[i, k-1] <- rbinom(n=1, size=SI[i,k-1], prob=recovery) # Losing infection - lost from NI and gained by NN } # Recruitment GI[i, k-1] <- rpois(1, lambda = gamma[2]) GN[i, k-1] <- rpois(1, lambda = gamma[1]) } # Total population size NI[,k] <- SI[,k-1] + GI[,k-1] + TrN[,k-1] - TrI[,k-1] NN[,k] <- SN[,k-1] + GN[,k-1] + TrI[,k-1] - TrN[,k-1] } for(i in 1:nsites){ for(j in 1:nyears){ for(k in 2:nsurveys){ yN[i, j, k] <- rbinom(n = 1, NN[i,k], p[k]) yI[i, j, k] <- rbinom(n = 1, NI[i,k], p[k]) } } } return(list( # --------------- arguments input ------------------------ nsites = nsites, nyears = nyears, nsurveys = nsurveys,alpha.lam= alpha.lam,omega = omega,gamma = gamma, infection = infection, recovery = recovery, # ---------------- generated values ---------------------- SN = SN, # sites x intervals, number of noninfected frogs surviving SI = SI, # sites x intervals, number of infected frogs surviving GN = GN, # sites x intervals, number of noninfected frogs recruited GI = GI, # sites x intervals, number of infected frogs recruited TrI = TrI, # sites x intervals, number of infected frogs recovering TrN = TrN, # sites x intervals, number of noninfected frogs becoming infected NN = NN, # sites x years, number of noninfected frogs in the population NI = NI, # sites x years, number of infected frogs in the population p = p, # length nyears, probability of detection yN = yN, # sites x years x surveys, number of noninfected frogs detected yI = yI)) # sites x years x surveys, number of infected frogs detected }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simFrogDisease.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simHDS - AHM1 section 8.5.1 p444 # Function to simulate data under hierarchical distance sampling protocol (line or point) # (introduced in AHM1 Section 8.5.1) simHDS <- function(type=c("line", "point"), nsites = 100, mean.lambda = 2, beta.lam = 1, mean.sigma = 1, beta.sig = -0.5, B = 3, discard0=TRUE, show.plot=TRUE){ # # Function simulates hierarchical distance sampling (HDS) data under # either a line (type = "line") or a point (type = "point") # transect protocol. # Function arguments: # nsites: Number of sites (spatial replication) # alpha.lam (= log(mean.lambda)), beta.lam: intercept and # slope of log-linear regression of expected lambda # on a habitat covariate # alpha.sig (= log(mean.sigma)), beta.sig: intercept and # slope of log-linear regression of scale parameter of # half-normal detection function on wind speed # B: strip half width # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) stopifNegative(mean.lambda, allowZero=FALSE) stopifNegative(mean.sigma, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- type <- match.arg(type) # Get covariates habitat <- rnorm(nsites) # habitat covariate wind <- runif(nsites, -2, 2) # wind covariate # Simulate abundance model (Poisson GLM for N) lambda <- exp(log(mean.lambda) + beta.lam*habitat) # density per "square" N <- rpois(nsites, lambda) # site-specific abundances N.true <- N # for point: inside B # Detection probability model (site specific) sigma <- exp(log(mean.sigma) + beta.sig*wind) # Simulate observation model data <- NULL for(i in 1:nsites){ if(N[i]==0){ data <- rbind(data, c(i,NA,NA,NA,NA)) # save site, y=1, u, v, d next } if(type=="line"){ # Simulation of distances, uniformly, for each ind. in pop. # note it piles up all N[i] guys on one side of the transect d <- runif(N[i], 0, B) p <- exp(-d *d / (2 * (sigma[i]^2))) # Determine if individuals are captured or not y <- rbinom(N[i], 1, p) u <- v <- rep(NA, N[i]) # coordinates (u,v) # Subset to "captured" individuals only d <- d[y==1] u <- u[y==1] v <- v[y==1] y <- y[y==1] } if(type=="point"){ # Simulation data on a square u <- runif(N[i], 0, 2*B) v <- runif(N[i], 0, 2*B) d <- sqrt((u-B)^2 + (v-B)^2) N.true[i] <- sum(d<= B) # Population size inside of count circle # Can only count indidividuals in the circle, so set to zero p # of individuals in the corners (thereby truncating them) p <- exp(-d *d / (2 * (sigma[i]^2))) # Detection probabiilty .. pp <- ifelse(d <= B, 1, 0) * p # ... times "inside or outside" y <- rbinom(N[i], 1, pp) # Det./non-detection of each individual # Subset to "captured" individuals only u <- u[y==1] v <- v[y==1] d <- d[y==1] y <- y[y==1] } # Compile things into a matrix and insert NA if no individuals were # captured at site i. Coordinates (u,v) are not used here. if(sum(y) > 0) data <- rbind(data, cbind(rep(i, sum(y)), y, u, v, d)) else data <- rbind(data, c(i,NA,NA,NA,NA)) # make a row of missing data } colnames(data) <- c("site", "y", "u", "v", "d") # name 1st col "site" # Subset to sites at which individuals were captured. You may or may not # want to do this depending on how the model is formulated so be careful. if(discard0) data <- data[!is.na(data[,2]),] # Visualisation if(show.plot) { if(type=="line"){ # For line transect op <- par(mfrow = c(1, 3)) ; on.exit(par(op)) tryPlot <- try( { hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed counts (n) vs. habitat") plot(wind, n, main = "Observed counts (n) vs. wind speed") }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } if(type=="point"){ # For point transect op <- par(mfrow = c(2,2)) ; on.exit(par(op)) tryPlot <- try( { plot(data[,"u"], data[,"v"], pch = 16, main = "Located individuals in point transects", xlim = c(0, 2*B), ylim = c(0, 2*B), col = data[,1], asp = 1) points(B, B, pch = "+", cex = 3, col = "black") plotrix::draw.circle(B, B, B) hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed counts (n) vs. habitat") plot(wind, n, main = "Observed counts (n) vs. wind speed") }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } } # Output list(type = type, nsites = nsites, mean.lambda = mean.lambda, beta.lam = beta.lam, mean.sigma = mean.sigma, beta.sig = beta.sig, B = B, data=data, habitat=habitat, wind=wind, N = N, N.true = N.true ) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simHDS_AHM1_8-5-1_Simulate_hierarch_distance_sampling.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simHDSg - AHM1 section 9.2.1 p466 # Function to simulate data under HDS protocol with groups # (introduced in AHM1 Section 9.2.1) simHDSg <- function(type = c("line", "point"), nsites = 100, lambda.group = 0.75, alpha0 = 0, alpha1 = 0.5, beta0 = 1, beta1 = 0.5, B = 4, discard0 = TRUE, show.plot=TRUE){ # # Function simulates hierarchical distance sampling (HDS) data for groups under # either a line (type = "line") or a point (type = "point") transect protocol # and using a half-normal detection function (Buckland et al. 2001). # Other function arguments: # nsites: Number of sites (spatial replication) # lambda.group: Poisson mean of group size # alpha0, alpha1: intercept and slope of log-linear model relating sigma of # half-normal detection function to group size # beta0, beta1: intercept and slope of log-linear model relating the Poisson # mean of the number of groups per unit area to habitat # B: strip half width # # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) stopifNegative(lambda.group, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- type <- match.arg(type) # Get covariates habitat <- rnorm(nsites) # Simulated covariate # Simulate abundance model for groups (Poisson GLM for N) lambda <- exp(beta0 + beta1*habitat) # Density of groups per "square" N <- rpois(nsites, lambda) # site-specific number of groups N.true <- N # for point: inside of B # Simulate observation model data <- groupsize <- NULL for(i in 1:nsites){ if(N[i]==0){ data <- rbind(data,c(i,NA,NA,NA,NA,NA)) # save site, y=1, u, v, d next } if(type=="line"){ # Simulation of distances, uniformly, for each individual in the population d <- runif(N[i], 0, B) gs <- rpois(N[i],lambda.group) +1 # Observable group sizes >= 1 groupsize <-c(groupsize,gs) sigma.vec <- exp(alpha0 + alpha1*(gs-1)) # Subtract 1 for interpretation # Detection probability for each group p <- exp(-d*d/(2*(sigma.vec^2))) # Determine if individuals are captured or not y <- rbinom(N[i], 1, p) u1 <- u2 <- rep(NA,N[i]) # Subset to "captured" individuals only d <- d[y==1] ; u1 <- u1[y==1] ; u2 <- u2[y==1] ; gs <- gs[y==1] ; y <- y[y==1] } if(type=="point"){ # Simulation of data on a circle of radius B (algorithm of Wallin) angle <- runif(N[i], 0, 2*pi) r2 <- runif(N[i], 0, 1) r <- B*sqrt(r2) u1 <- r*cos(angle) + B u2 <- r*sin(angle) + B d <- sqrt((u1 - B)^2 + (u2-B)^2) ## d == r ! This block is all cruft N.true[i] <- sum(d<= B) # Population size inside of count circle, should be N[i] here. gs <- rpois(N[i], lambda.group) + 1 groupsize <-c(groupsize,gs) sigma.vec <- exp(alpha0 + alpha1*(gs-1)) # For counting individuals on a circle so we truncate p here ## cruft p <- ifelse(d<(B), 1, 0)*exp(-d*d/(2*(sigma.vec^2))) y <- rbinom(N[i], 1, p) # Subset to "captured" individuals only d <- d[y==1] ; u1 <- u1[y==1] ; u2 <- u2[y==1] ; gs <- gs[y==1] ; y <- y[y==1] } # Now compile things into a matrix and insert NA if no individuals were # captured at site i. Coordinates (u,v) are preserved. if(sum(y) > 0) { data <- rbind(data,cbind(rep(i, sum(y)), y, u1, u2, d, gs)) } else { data <- rbind(data,c(i,NA,NA,NA,NA,NA)) # make a row of missing data } } colnames(data)[1] <- "site" # Subset to sites at which individuals were captured. You may or may not # do this depending on how the model is formulated so be careful. if(discard0) data <- data[!is.na(data[,2]),] # Visualization if(show.plot) { if(type=="line"){ # For line transect op <- par(mfrow = c(1, 3)) ; on.exit(par(op)) tryPlot <- try( { hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances to groups", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed group counts (n) vs. habitat", frame = FALSE) plot(table(data[,"gs"]), main = "Observed group sizes", ylab = "Frequency", frame = FALSE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } if(type=="point"){ # For point transect op <- par(mfrow = c(2,2)) ; on.exit(par(op)) tryPlot <- try( { plot(data[,"u1"], data[,"u2"], pch = 16, main = "Located groups in point transects", xlim = c(0, 2*B), ylim = c(0, 2*B), col = data[,1], asp = 1) points(B, B, pch = "+", cex = 3) plotrix::draw.circle(B, B, B) hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances to groups", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed group counts (n) vs. habitat", frame = FALSE) plot(table(data[,"gs"]), main = "Observed group sizes", ylab = "Frequency", frame = FALSE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } } # Output list(type = type, nsites = nsites, lambda.group = lambda.group, alpha0 = alpha0, alpha1 = alpha1, beta0 = beta0, beta1 = beta1, B = B, data=data, habitat=habitat, N = N, N.true = N.true, groupsize=groupsize) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simHDSg_AHM1_9-2-1_Simulate_hierarch_distance_sampling_groups.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simHDSopen - AHM1 section 9.5.4.1 p499 # Function to generate open hierarchical distance sampling data # (introduced in AHM1 Section 9.5.4.1) simHDSopen <- function(type=c("line", "point"), nsites = 100, mean.lam = 2, beta.lam = 0, mean.sig = 1, beta.sig = 0, B = 3, discard0=TRUE, nreps=2, phi=0.7, nyears=5, beta.trend = 0){ # Function simulates hierarchical distance sampling data under either a # line (type = "line") or a point (type = "point") transect protocol. # Simulates a LIST OF LISTS of individual observations. # element [[i]][[j]] is the individual observations for replicate j of year i # Function arguments: # nsites: Number of sites (spatial replication) # alpha.lam (= log(mean.lambda)), beta.lam: intercept and # slope of log-linear regression of expected lambda # on a habitat covariate # alpha.sig (= log(mean.sigma)), beta.sig: intercept and # slope of log-linear regression of scale parameter of # half-normal detection function on wind speed # B: strip half width # # more things here # # Note: for "point" the realized density is #[(area of circle) /(area of square)]*lambda # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) stopifNegative(mean.lam, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- type <- match.arg(type) parmvec <- c(mean.lam, beta.lam, mean.sig, beta.sig, phi, beta.trend) names(parmvec)<- c("mean.lam", "beta.lam", "mean.sig", "beta.sig", "phi", "beta.trend") # Make a covariate habitat <- rnorm(nsites) # habitat covariate # covariate "wind" is replicate level covariate # Simulate abundance model (Poisson GLM for M) M <-lambda <- matrix(NA, nrow=nsites, ncol=nyears) Na <- wind <- array(NA,dim=c(nsites,nreps,nyears)) Na.real <- array(0, dim =c(nsites,nreps,nyears)) for(i in 1:nyears){ lambda[,i] <- exp(log(mean.lam) + beta.lam*habitat + beta.trend*(i-nyears/2) ) # density per "square" M[,i] <- rpois(nsites, lambda[,i]) # site-specific abundances Na[,,i] <- matrix(rbinom(nsites*nreps, M[,i],phi), nrow=nsites, byrow=FALSE) wind[,,i] <- runif(nsites*nreps, -2, 2) # Wind covariate } # Detection probability model (site specific) # this is now a 3-d array sigma <- exp(log(mean.sig) + beta.sig*wind) # Simulate observation model ## http://www.anderswallin.net/2009/05/uniform-random-points-in-a-circle-using-polar-coordinates/ outlist <-list() for(yr in 1:nyears){ list.yr <-list() for(rep in 1:nreps){ data <- NULL for(i in 1:nsites){ if(Na[i,rep,yr]==0){ data <- rbind(data, c(i,NA,NA,NA,NA)) # save site, y=1, u1, u2, d next } if(type=="line"){ # Simulation of distances, uniformly, for each ind. in pop. # note it piles up all M[i] guys on one side of the transect d <- runif(Na[i,rep,yr], 0, B) Na.real[i,rep,yr]<- sum(d<=B) p <- exp(-d *d / (2 * (sigma[i,rep,yr]^2))) # Determine if individuals are captured or not y <- rbinom(Na[i,rep,yr], 1, p) u1 <- u2 <- rep(NA, Na[i,rep,yr]) # coordinates (u,v) # Subset to "captured" individuals only d <- d[y==1] u1 <- u1[y==1] u2 <- u2[y==1] y <- y[y==1] } if(type=="point"){ angle <- runif(Na[i,rep,yr], 0, 2*pi) dd<- B*sqrt(runif(Na[i,rep,yr],0,1)) u1<- dd*cos(angle) + (B) u2<- dd*sin(angle) + (B) d <- sqrt((u1-B)^2 + (u2-B)^2) Na.real[i,rep,yr]<- sum(d<= B) p <- exp(-d *d / (2 * (sigma[i,rep,yr]^2))) # But we can only count individuals on a circle so we truncate p here pp <- ifelse(d < B, 1, 0) * p y <- rbinom(Na[i,rep,yr], 1, pp) # Det./non-detection of each individual # Subset to "captured" individuals only u1 <- u1[y==1] u2 <- u2[y==1] d <- d[y==1] y <- y[y==1] } # Compile things into a matrix and insert NA if no individuals were # captured at site i. Coordinates (u,v) are not used here. if(sum(y) > 0) { data <- rbind(data, cbind(rep(i, sum(y)), y, u1, u2, d)) } else { data <- rbind(data, c(i,NA,NA,NA,NA)) # make a row of missing data } } # end for(sites) colnames(data) <- c("site", "y", "u1", "u2", "d") # name 1st col "site" if(discard0) data <- data[!is.na(data[,2]),] list.yr[[rep]]<- data } # end for(rep) outlist[[yr]]<- list.yr } # end for(year) # Subset to sites at which individuals were captured. You may or may not # want to do this depending on how the model is formulated so be careful. list(data=outlist, B=B, nsites=nsites, habitat=habitat, wind=wind, M.true= M, K=nreps,nyears=nyears,Na=Na, Na.real=Na.real, mean.lam=mean.lam, beta.lam=beta.lam, mean.sig=mean.sig, beta.sig=beta.sig, phi=phi, beta.trend=beta.trend, parms=parmvec ) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simHDSopen.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simHDSpoint # This is a re-factored and more efficient version of the 'point' type for 'simHDS'. # Note that some of the arguments have changed. # Function to simulate data under hierarchical distance sampling protocol point transects simHDSpoint <- function(nsites = 1000, mean.density = 1, beta.density = 1, mean.sigma = 20, beta.sigma = -5, B = 60, discard0=FALSE, show.plots=TRUE){ # # Function simulates hierarchical distance sampling (HDS) data under # a point transect protocol. # Function arguments: # nsites: Number of sites (spatial replication) # mean.density: expected DENSITY, number of individuals per HECTARE # beta.density: coefficient of log(expected density) on habitat covariate # mean.sigma: scale parameter sigma in the half-normal detection function in METERS # beta.sigma: slope of log-linear regression of scale parameter on wind speed # B: maximum distance from the point, in METERS. # discard0: if TRUE, sites with no detections will be removed from the output. # show.plots: if TRUE, plots of the output are displayed # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) stopifNegative(mean.density, allowZero=FALSE) stopifNegative(mean.sigma, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- # Get covariates habitat <- rnorm(nsites) # habitat covariate wind <- runif(nsites, -2, 2) # wind covariate # Simulate abundance model (Poisson GLM for N) lambda <- exp(log(mean.density) + beta.density*habitat) * base::pi * B^2 / 1e4 # expected number in circle N <- rpois(nsites, lambda) # site-specific abundances # Detection probability model (site specific) sigma <- exp(log(mean.sigma) + beta.sigma*wind) # Simulate observation model dataList <- Nlist <- vector(mode = "list", length = nsites) counts <- numeric(nsites) for(i in 1:nsites){ if(N[i]==0){ dataList[[i]] <- c(site=i, d=NA) next } # Simulation of distance from point given uniform distribution on the circle (algorithm of Wallin) d <- sqrt(runif(N[i], 0, 1)) * B Nlist[[i]] <- d # Detection process, half-normal detection function p <- exp(-d^2 / (2 * (sigma[i]^2))) # Detection probability .. det <- rbinom(N[i], 1, p) # Det./non-detection of each individual counts[i] <- sum(det) # Subset to "captured" individuals only d <- d[det==1] if(length(d) == 0) d <- NA # Plug the d's into the list together with site IDs. dataList[[i]] <- cbind(site=i, d=d) } # convert dataList to a single matrix data <- do.call(rbind, dataList) # Subset to sites at which individuals were captured. You rarely # want to do this depending on how the model is formulated so be careful. if(discard0) data <- data[!is.na(data[,2]),] # Visualisation if(show.plots) { op <- par(mfrow = c(2,2)) ; on.exit(par(op)) tryPlot <- try( { angle <- runif(nrow(data), 0, 2*base::pi) x <- data[, 'd'] * cos(angle) y <- data[, 'd'] * sin(angle) plot(x, y, pch = 16, main = "Locations of detected individuals", xlim = c(-B, B), ylim = c(-B, B), col = data[,1], asp = 1) points(0, 0, pch = "+", cex = 3, col = "black") plotrix::draw.circle(0, 0, B) tmp <- hist(unlist(Nlist), breaks = 20, xlim=c(0, B), xlab = "Distance", main = "Frequency of distances\nblue: detected, gray: undetected") hist(data[,"d"], breaks = tmp$breaks, col = "lightblue", add=TRUE) plot(habitat, counts, main = "Observed counts (n) vs. habitat") plot(wind, counts, main = "Observed counts (n) vs. wind speed") }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output list( # arguments input nsites = nsites, mean.density = mean.density, beta.density = beta.density, mean.sigma = mean.sigma, beta.sigma = beta.sigma, B = B, # generated values data = data, counts = counts, habitat = habitat, wind = wind, N = N ) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simHDSpoint.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simHDStr - AHM1 section 9.3.2 p474 simHDStr <- function(type = c("line", "point"), method=c("removal", "double"), nsites=200, lambda.group = 1, alpha0 = 0, alpha1 = 0, beta0 = 1, beta1 = 0.5, p.avail = 0.75, K = 3, p.double = c(0.4, 0.6), B = 3, discard0=FALSE, show.plot=TRUE){ # A general function for simulating hierarchical distance sampling (HDS) data # combined with a time-removal (with 3 removal periods) or # double-observer protocol, either for a line (type = "line") or # a point (type = "point") transect protocol # and with method = "removal" or method = "double". # # Other function arguments: # nsites: Number of sites (spatial replication) # lambda.group: Poisson mean of group size # alpha0, alpha1: intercept and slope of log-linear model relating sigma of # half-normal detection function to group size # beta0, beta1: intercept and slope of log-linear model relating the Poisson # mean of the number of groups per unit area to habitat # p.avail: overall availability probability (phi in text) #int.avail: time interval-specific availability probability # K: number of removal periods (of equal length) # p.double: detection probability for first and second observer # B: strip half width # discard0: whether to discard or keep the data from sites with nobody detected # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) stopifNegative(lambda.group, allowZero=FALSE) stopifnotProbability(p.avail) K <- round(K)[1] stopifnotProbability(p.double) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- type <- match.arg(type) method <- match.arg(method) # Create covariate habitat <- rnorm(nsites) # Simulated continuous covariate # Simulate superpopulation abundance model for groups (Poisson GLM for M) lambda <- exp(beta0 + beta1*habitat) # Density of groups per "square" M <- rpois(nsites, lambda) # site-specific number of groups M.true <- M # for point: inside of B data <- NULL for(i in 1:nsites){ if(M[i]==0){ data <- rbind(data,c(i,NA,NA,NA,NA,NA,NA)) # save site, y=1, u1, u2, d, gs, tint next } # Simulation for line transect data if(type=="line"){ # Simulation of distances, uniformly, for each individual in the population # note it piles up all N[i] guys on one side of the transect d <- runif(M[i], 0, B) gs <- rpois(M[i], lambda.group) + 1 # Observable group size sigma.vec<- exp(alpha0 + alpha1*(gs-1) ) # subtract 1 for interpretation # Detection probability for each group p <- exp(-d*d/(2*(sigma.vec^2))) # Time-removal protocol if(method=="removal"){ int.avail <- 1 - (1-p.avail)^(1/K) rem.probs <- c(int.avail, ((1-int.avail)^(1:(K-1)))*int.avail) mn.probs <- c(rem.probs, 1-sum(rem.probs)) # was this (think wrong): aux <- sample(1:(K+1), N[i], replace=TRUE, prob=mn.probs) aux <- sample(1:(K+1), M[i], replace=TRUE, prob=mn.probs) aux[aux==(K+1)] <- 0 } # Double-observer protocol if(method=="double"){ rem.probs <- c(p.double[1]*(1-p.double[2]), (1-p.double[1]) * p.double[2], p.double[1]*p.double[2]) mn.probs <- c(rem.probs, 1-sum(rem.probs)) aux <- sample(1:4, M[i], replace=TRUE, prob=mn.probs) aux[aux==4]<- 0 } newp <- p * as.numeric(aux!=0) navail <- sum(aux!=0) if(navail==0){ data <- rbind(data,c(i,NA,NA,NA,NA,NA,NA)) # save site, y=1, u1, u2, d next } # generate count of birds based on combined probability of detection y <- rbinom(M[i], 1, newp) # Subset to "captured" individuals only u1 <- u2 <- rep(NA, sum(y)) ; d <- d[y==1] ; gs <- gs[y==1] aux <- aux[y==1] ; y <- y[ y==1] } # Simulation for point transect data if(type=="point"){ # Simulation of data on a circle of radius B angle <- runif(M[i], 0, 2*pi) r2 <- runif(M[i], 0, 1) r<- B*sqrt(r2) u1<- r*cos(angle) + B u2<- r*sin(angle) + B d <- sqrt((u1 - B)^2 + (u2-B)^2) ## d == r ! Cruft... M.true[i] <- sum(d<= B) # Population size inside of count circle gs <- rpois(M[i], lambda.group) + 1 sigma.vec <- exp(alpha0 + alpha1*(gs-1)) # But we can only count individuals on a circle so we truncate p here p <- ifelse(d<(B),1,0)*exp(-d*d/(2*(sigma.vec^2))) # Time-removal protocol if(method=="removal"){ int.avail <- 1 - (1-p.avail)^(1/K) rem.probs <- c(int.avail, ((1-int.avail)^(1:(K-1)))*int.avail) mn.probs <- c(rem.probs, 1-sum(rem.probs)) aux <- sample(1:(K+1), M[i], replace=TRUE, prob=mn.probs) aux[aux==(K+1)] <- 0 } # Double-observer protocol if(method=="double"){ rem.probs <- c(p.double[1]*(1-p.double[2]), (1-p.double[1])*p.double[2], p.double[1]*p.double[2]) mn.probs <- c(rem.probs, 1-sum(rem.probs)) aux <- sample(1:(K+1), M[i], replace=TRUE, prob=mn.probs) aux[aux==(K+1)]<- 0 } newp <- p * as.numeric(aux!=0) navail <- sum(aux!=0) if(navail==0){ data <- rbind(data,c(i,NA,NA,NA,NA,NA,NA)) # save site, y=1, u1, u2, d next } # generate count of birds based on combined probability of detection y <- rbinom(M[i], 1, newp) # Subset to "captured" individuals only u1 <- u1[y==1] ; u2 <- u2[y==1] ; d <- d[y==1] ; gs <- gs[ y==1] aux <- aux[y==1] ; y <- y[ y==1] } # Now compile things into a matrix and insert NA if no individuals were # captured at site i. Coordinates (u,v) are not used here. if(sum(y)>0){ data <- rbind(data, cbind(rep(i, sum(y)), y, u1, u2, d, gs, aux)) } else { data <- rbind(data, c(i,NA,NA,NA,NA,NA,NA)) # make a row of missing data } } # end of for loop colnames(data)[1] <- "site" # Subset to sites at which individuals were captured. You may or may not # do this depending on how the model is formulated so be careful. if(discard0) data <- data[!is.na(data[,2]),] parmvec <- c(alpha0, alpha1, beta0, beta1, p.avail, p.double) names(parmvec) <- c("alpha0", "alpha1", "beta0", "beta1", "p.avail", "p.double1", "p.double2") # Visualisation if(show.plot) { if(type=="line"){ # For line transect op <- par(mfrow = c(1, 3)) ; on.exit(par(op)) tryPlot <- try( { hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances to groups", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed group counts (n) vs. habitat", frame = FALSE) plot(table(data[,"gs"]), main = "Observed group sizes", ylab = "Frequency", frame = FALSE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } if(type=="point"){ # For point transect op <- par(mfrow = c(2,2)) ; on.exit(par(op)) tryPlot <- try( { plot(data[,"u1"], data[,"u2"], pch = 16, main = "Located groups in point transects", xlim = c(0, 2*B), ylim = c(0, 2*B), col = data[,1], asp = 1) points(B, B, pch = "+", cex = 3) # library(plotrix) draw.circle(B, B, B) hist(data[,"d"], col = "lightblue", breaks = 20, main = "Frequency of distances to groups", xlab = "Distance") ttt <- table(data[,1]) n <- rep(0, nsites) n[as.numeric(rownames(ttt))] <- ttt plot(habitat, n, main = "Observed group counts (n) vs. habitat", frame = FALSE) plot(table(data[,"gs"]), main = "Observed group sizes", ylab = "Frequency", frame = FALSE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } } # Output list(type = type, method = method, nsites = nsites, lambda.group = lambda.group, alpha0 = alpha0, alpha1 = alpha1, beta0 = beta0, beta1 = beta1, p.avail = p.avail, p.double = p.double, K = K, B = B, data=data, habitat=habitat, M = M, M.true = M.true, parms = parmvec) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simHDStr.R
# Code from Marc's Word file "new simHDS function with availability.docx" # See emails 2022-02-16 ff. # 1 A new data simulation function for HDS + PC data # We now package into a function code written by Ken. All distance units will be thought of as being in metres # Changes by Mike: # Change function name to 'simIDS'. # Use new 'simHDSpoint' function. # Use density instead of abundance, change mean.lambda to mean.density, beta.lam to beta.density. # Reorder arguments by data type (DS, PC, OC); use upper case HDS, PC, OC for all arguments. # Skip PC or OC data generation if nsites_PC or nsites_OC = 0 # ------------ Start of function definition --------------- simIDS <- function(mean.density = 1, beta.density = 1, mean.phi = 0.14, beta.phi = 0, # for distance sampling nsites_HDS = 1000, sigHDS = 100, maxDist_HDS = 200, nbins = 4, range.dur.HDS = c(5, 5), # for point counts nsites_PC = 10000, sigPC = 70, maxDist_PC = 500, range.dur.PC = c(3, 30), # for occupancy = detection/nondetection nsites_OC = 5000, sigOC = sigPC, maxDist_OC = maxDist_PC, range.dur.OC = range.dur.PC, show.plots = TRUE) { # # Generates HDS data and PC data for an integrated distance sampling # (IDS) model with shared density and availability processes, # but possibly different detection/perceptability process # (i.e., different detection functions). # # All lengths are in meters, densities in animals per hectare. # # Arguments: # Ecological model: # mean.density: expected density, animals per ha. # beta.density: coefficient of expected density on habitat covariate # mean.phi: singing or activity rate phi, so that under the model of # Solymos et al. (2013) availability probability # theta = 1 – exp(- duration * mean.phi). # beta.phi: coefficient of log(singing rate) on some covariate (LATER PERHAPS) # Distance sampling # nsites_HDS: number of sites with HDS protocol # sigHDS: scale parameter sigma in the half-normal detection function # at the HDS sites # nbins: Number of distance bins # maxDist_HDS: Truncation distance of observations in the HDS protocol; # any observations beyond this are discarded. # range.dur.HDS: range of the survey durations in minutes for the HDS # surveys. By default, this is set to c(5, 5) for the HDS data, # to enforce constancy (with 5 min distance sampling) # Point counts # nsites_PC: number of sites with simple point count (PC) protocol # sigPC: scale parameter sigma in the half-normal detection function # at the PC sites # maxDist_PC: Maximum distance from the observer at which animals can be detected. # range.dur.PC: range of the survey durations in minutes for the PC # surveys. Means will draw from uniform distribution on those bounds. # Detection/nondetection # nsites_OC: number of sites with Detection/nondetection (occupancy) protocol # sigOC: scale parameter sigma in the half-normal detection function # at the OC sites # maxDist_OC: Maximum distance from the observer at which animals can be detected. # range.dur.OC: range of the survey durations in minutes for the OC # surveys. Means will draw from uniform distribution on those bounds. # Written by Ken Kellner and Marc Kéry, February 2022 # Desecrated by Mike Meredith, on-going # Distance sampling data # ---------------------- ## Simulate a regular distance sampling data set dat1.raw <- simHDSpoint(nsites = nsites_HDS, mean.density = mean.density, beta.density = beta.density, mean.sigma = sigHDS, beta.sigma = 0, B = maxDist_HDS, discard0 = FALSE, show.plots = show.plots) # str(dat1.raw) # Re-format to put into an `unmarkedFrameDS` object: # Convert long-form to y matrix db <- seq(0, maxDist_HDS, length.out=(nbins+1)) # Distance bins, equal widths tmp <- with(dat1.raw, tapply(data[,2], data[,1], function(x) hist(x, breaks=db, plot=FALSE)$counts)) # returns a list y_hds <- do.call(rbind, tmp) # convert list to a matrix # Site covariates sc_hds <- data.frame(habitat=dat1.raw$habitat) # Create unmarked frame for HDS data set: # 'Null data set' has perfect availability umf_hds0 <- unmarkedFrameDS(y=y_hds, siteCovs=sc_hds, survey="point", dist.breaks=db, unitsIn="m") # summary(umf_hds0) # Add availability # Simulate survey durations for HDS data dds <- runif(nsites_HDS, min(range.dur.HDS), max(range.dur.HDS)) # HDS # Linear model for the singing/activity rate phi phi <- mean.phi # possibly later also add covariate ### Simulate availability process on existing datasets # (1) Model of Solymos et al. 2013 # HDS pdds <- 1-exp(-1*dds*phi) # Compute availability prob. umf_hds1 <- umf_hds0 # Copy unmarked data frame tmp <- umf_hds0@y umf_hds1@y <- array(rbinom(length(tmp), tmp, pdds), dim=dim(tmp)) # Point count data # ---------------- if(nsites_PC > 0) { ## Simulate Point Count data set (raw, without availability so far) dat2.raw <- simHDSpoint(nsites = nsites_PC, mean.density = mean.density, beta.density = beta.density, mean.sigma = sigPC, beta.sigma = 0, B = maxDist_PC, discard0 = FALSE, show.plots = show.plots) # Re-format to put in a unmarkedFramePCount object y_pc <- matrix(dat2.raw$counts, ncol=1) # Site covariates sc_pc <- data.frame(habitat=dat2.raw$habitat) # Create unmarked frame for PC data set: # 'Null data set' with perfect availability umf_pc0 <- unmarkedFramePCount(y=y_pc, siteCovs=sc_pc) # summary(umf_pc0) # Add availability # Simulate survey durations for PC data dpc <- runif(nsites_PC, min(range.dur.PC), max(range.dur.PC)) # Linear model for the singing/activity rate phi phi <- mean.phi # possibly later also add covariate # Simulate availability process on existing datasets pdpc <- 1-exp(-1*dpc*phi) # Compute availability prob. umf_pc1 <- umf_pc0 # Copy unmarked data frame # tmp <- umf_pc0@y # umf_pc1@y <- array(rbinom(length(tmp), tmp, pdpc), dim=dim(tmp)) umf_pc1@y <- matrix(rbinom(nsites_PC, umf_pc0@y, pdpc), ncol=1) } # Detection/nondetection data # --------------------------- if(nsites_OC > 0) { ## Simulate occupancy data set (raw, without availability so far) dat3.raw <- simHDSpoint(nsites = nsites_OC, mean.density = mean.density, beta.density = beta.density, mean.sigma = sigOC, beta.sigma = 0, B = maxDist_OC, discard0 = FALSE, show.plots = show.plots) # Re-format to put in a unmarkedFramePCount object tmp <- dat3.raw$counts > 0 y_oc <- matrix(as.numeric(tmp), ncol=1) # Site covariates sc_oc <- data.frame(habitat=dat3.raw$habitat) # Create unmarked frame for OC data set: # 'Null data set' with perfect availability umf_oc0 <- unmarkedFrameOccu(y=y_oc, siteCovs=sc_oc) # summary(umf_oc0) # Add availability # Simulate survey durations for OC data doc <- runif(nsites_OC, min(range.dur.OC), max(range.dur.OC)) # Linear model for the singing/activity rate phi phi <- mean.phi # possibly later also add covariate pdoc <- 1-exp(-1*doc*phi) # Compute availability prob. umf_oc1 <- umf_oc0 # Copy unmarked data frame n_ind_avail <- rbinom(nsites_OC, dat3.raw$counts, pdoc) # y_oc2 <- matrix(as.numeric(n_ind_avail > 0), ncol=1 umf_oc1@y <- matrix(as.numeric(n_ind_avail > 0), ncol=1) } # Make plot of availability model if(show.plots) { dur <- 0:max(range.dur.PC, range.dur.OC) par(mfrow = c(1,1), mar = c(5,5,4,3), cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5) plot(dur, 1-exp(-1*dur*mean.phi), xlab = 'Survey duration (min)', ylab = 'Availability probability', main = paste('Parametric function for availability,\nwith phi =', round(mean.phi,2)), type = 'l', frame = FALSE, ylim = c(0, 1), lwd = 5, col = 'grey') } # Numerical output out <- list( # input arguments mean.density = mean.density, beta.density = beta.density, mean.phi = mean.phi, beta.phi = beta.phi, nsites_HDS = nsites_HDS, sigHDS = sigHDS, nbins = nbins, maxDist_HDS = maxDist_HDS, range.dur.HDS = range.dur.HDS, nsites_PC = nsites_PC, sigPC = sigPC, maxDist_PC = maxDist_PC, range.dur.PC = range.dur.PC, nsites_OC = nsites_OC, sigOC = sigOC, maxDist_OC = maxDist_OC, range.dur.OC = range.dur.OC, # generated values phi = phi, dds = dds, umf_hds0 = umf_hds0, umf_hds1 = umf_hds1) if(nsites_PC > 0) { out$dpc <- dpc out$umf_pc0 <- umf_pc0 out$umf_pc1 <- umf_pc1 } if(nsites_OC > 0) { out$doc <- doc out$umf_oc0 <- umf_oc0 out$umf_oc1 <- umf_oc1 } return(out) } # ------------ End of function definition ---------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simIDS.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # issj.sim - AHM1 section 9.7.1 p517 # Function to simulate open distance sampling data for the Island Scrub Jays # (introduced in AHM1 Section 9.7.1) issj.sim <- function(B, db, lam, sigma, phi, gamma, npoints, nyrs, nbsize=-1.02){ stopifnot(min(db) == 0 && max(db) == B) lam <- as.vector(lam) sigma <- as.vector(sigma) stopifnot(length(lam) == length(sigma)) nsites <- length(lam) # number of grid cells stopifnot(phi >= 0 && phi <= 1) stopifnot(gamma >= 0) stopifnot(npoints <= nsites) nD <- length(db) - 1 # Number of distance classes Nsim <- matrix(0, nrow=nsites, ncol=nyrs) # simulated number of birds by site and year #yr 1 as before Nsim[,1]<-rnbinom (n=nsites, size=exp(nbsize), mu=lam) #generate individual counts per grid cell/point count circle for(y in 2:nyrs){ Nsim[,y]<-rbinom(nsites, Nsim[, y-1], phi) + rpois(nsites, Nsim[, y-1]*gamma) } #generate distance from hypothetical point count locations # first, set prob for an individual to be in a 1m distance class from the center point rc <- 1:B ri <- (0:(B-1)) ar <- pi * (rc^2 - ri^2) pcc <- ar/sum(ar) NcList <- vector("list", nyrs) for (y in 1:nyrs){ NcList[[y]] <- matrix(nrow=0, ncol=2) for (j in 1:nsites){ # This is for all sites if (Nsim[j,y]==0) next junk <- rmultinom(1, Nsim[j,y], pcc) # count of birds in each 1m band tt <-rep( (which(junk!=0) - 0.5), (junk[which(junk!=0)]) ) # distance from centre Ndist <- cbind(rep(j,Nsim[j,y]), tt ) NcList[[y]]<-rbind(NcList[[y]], Ndist) } } # for each sampling point generate detection data based on distance of individuals within a max of 300m # and the detection model from the paper cell<-sort(sample(1:nsites, npoints, replace=FALSE)) detList <- vector("list", nyrs) for (y in 1:nyrs){ for (j in cell) { dvec <- NcList[[y]][NcList[[y]][,1]==j, 2] if(length(dvec)==0) { det <- rep(0, nD) } else { pvec <- exp(-dvec^2/(2*(sigma[j]^2))) dets <- dvec[rbinom(length(dvec),1,pvec )==1] det <- table(cut(dets, db, include.lowest=TRUE)) } detList[[y]]<-rbind(detList[[y]],det) } } # **npoints** x nyears matrix of total detections y<-sapply(detList, rowSums) # Pool all of the detection data into long vectors of distance category and site across all years dclass<-site<-NULL for (t in 1:nyrs){ for (s in 1:npoints){ if (y[s,t]==0) next ssi<-rep(cell[s], y[s,t]) dc<-NULL for (k in 1:nD){ if(detList[[t]][s,k]==0) next dd<-rep(k, detList[[t]][s,k]) dc<-c(dc, dd) } dclass<-c(dclass, dc) site<-c(site, ssi) } } return(list(NcList=NcList, detList=detList, N=Nsim, cell=cell, y=y, dclass=dclass, site=site, nsites=nsites, lam=lam, phi=phi, gamma=gamma, sigma=sigma)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simIssj.sim.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # sim.ldata - AHM1 section 8.2.3 p402 # Function to simulate non-hierarchical line transect data # (introduced in AHM1 Section 8.2.3) sim.ldata <- function(N = 200, sigma = 30, show.plot = TRUE){ # Function to simulate line transect data under CDS. # Function arguments: # N: number of individuals along transect with distance u(-100, 100) # sigma: scale parameter of half-normal detection function # Function subjects N individuals to sampling, and then retains the value # of x=distance only for individuals that are captured if(FALSE) x <- NULL # fix issues with curve # Checks and fixes for input data ----------------------------- N <- round(N[1]) stopifNegative(sigma, allowZero=FALSE) # -------------------------------------------- xall <- runif(N, -100,100) # Distances of all N individuals g <- function(x, sig) exp(-x^2/(2*sig^2)) p <- g(xall, sig=sigma) # detection probability y <- rbinom(N, 1, p) # some inds. are detected and their distance measured x <- xall[y==1] # this has direction (right or left transect side) x <- abs(x) # now it doesn't have direction if(show.plot) { op <- par(mfrow = c(1,2)) ; on.exit(par(op)) # Plot the detection function tryPlot <- try( { curve(exp(-x^2/(2*sigma^2)), 0, 100, xlab="Distance (x)", ylab="Detection prob.", lwd = 2, main = "Detection function", ylim = c(0,1)) text(80, 0.9, paste("sigma:", sigma)) hist(abs(xall), nclass=10, xlab = "Distance (x)", col = "grey", main = "True (grey) \nand observed distances (blue)") hist(x, col = "blue", add = TRUE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } return(list(N = N, sigma = sigma, xall = xall, x = x)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simLdata_AHM1_8-2-3_Simulate_line_transect_data.R
# AHM2 section 2.7.1 Simulate data for a multinomial-mixture model # Called data.fn in the draft # with R=nsites, J=nsurveys, K=nyears simMultMix <- function(nsites = 100, nsurveys = 3, nyears = 4, lambda = 3, theta = 0.5, p = 0.3){ # Simulate data using the multinomial-Poisson model with a # repeated constant-interval removal design (written by R.B. Chandler) # R : Number of sites (variable) # K <- 4 # Number of primary periods # J <- 3 # Number of secondary periods # lambda, theta and p: expected abundance, availability and detection prob. # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nsurveys <- round(nsurveys[1]) nyears <- round(nyears[1]) stopifNegative(lambda, allowZero=FALSE) stopifnotProbability(theta) stopifnotProbability(p) # -------------------------------------------- y <- array(NA, c(nsites, nyears, nsurveys)) M <- rpois(nsites, lambda) # Local population size N <- matrix(NA, nsites, nyears) # Individuals available for detection for(i in 1:nsites) { N[i,] <- rbinom(nyears, M[i], theta) y[i,,1] <- rbinom(nyears, N[i,], p) # Observe some Nleft1 <- N[i,] - y[i,,1] # Remove them y[i,,2] <- rbinom(nyears, Nleft1, p) # ... Nleft2 <- Nleft1 - y[i,,2] y[i,,3] <- rbinom(nyears, Nleft2, p) } y2d <- cbind(y[,1,], y[,2,], y[,3,], y[,4,]) return(list( # ------ input arguments ------ nsites = nsites, nsurveys = nsurveys, nyears = nyears, lambda = lambda, theta = theta, p = p, # ------ generated values ------ M = M, # local population size N = N, # animals available for detection y = y, # sites x years x surveys array of observations y2d = y2d)) # y in 2d format, sites x (years*surveys) matrix }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simMultMix_AHM2_2.7.1.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # From Marc, 13 June 2019 # "Build up the BerneseOberland data set and the simNmixSp functions.docx" # Originally simNmixSpatial. # ------ Define function simNmixSpatial -------- simNmixSpatial <- function(nsurveys = 3, mean.lambda = exp(2), beta = c(2, -2), mean.p = 0.5, alpha = c(-1, -1), sample.size = 500, variance.RF = 1, theta.RF = 10, seeds = c(10, 100), truncN = 6, show.plots = TRUE, verbose = TRUE) { # Simulates replicated counts under a spatial, static binomial N-mixture model for a semi-realistic landscape in a square of 50x50 km in the BernesOberland around Interlaken, Switzerland. # Unit of the data simulation is a 1km2 quadrat, hence, there are 2500 units. # For abundance, the function allows you to specify a quadratic effect of elevation, the data for which are contained in the data set BerneseOberland, which is part of the AHMbook package and is a subset of the data set 'Switzerland' in R package unmarked. # Then, a Gaussian spatial random field (s) with negative exponential correlation function is simulated using the AHMbook function simExpCorrRF. For that field, you can set the variance and the range scale parameter theta (see helptext for that function for more details). Basically, the larger the value of theta.RF, the bigger are the 'islands' simulated in the random field. # The abundance in each quadrat i is built up via the following linear predictor: # lambda[i] <- exp(beta0 + beta1 * elev[i] + beta2 * elev[i]^2 + s[i]) # N[i] ~ Poisson(lambda[i]) #Replicated counts are simulated as usual under a binomial observation model, and detection probability is allowed to vary by one site and one observational covariate: respectively quadrat forest cover, which is real data in the BerneseOberland data set, and wind-speed, which is invented data. #Counts at each site (i) and for each occasion (j) are then produced according to the following model: # p[i,j] <- plogis(alpha0 + alpha1 * forest[i] + alpha2 * wind[i,j]) # C[i,j] ~ Binomial(N[i], p[i,j]) #Finally, we assume that not each one of the 2500 quadrats is surveyed. Hence, we allow you to choose the number of quadrats that are surveyed and these will then be randomly placed into the landscape. We then assume that counts will only be available for these surveyed quadrats, i.e., counts from all non-surveyed quadrats will be NA'd out. # truncN is a graphical parameter that truncates the z axis in some plots BerneseOberland <- NULL # otherwise "no visible binding for global variable 'BerneseOberland'" when checked data(BerneseOberland, envir = environment()) # Simulate spatial random field set.seed(seeds[1]) s <- simExpCorrRF(variance = variance.RF, theta = theta.RF, show.plots=show.plots) # SimulateNmix data with spatially correlated random effect in abundance nsites<- 2500 # Number of sites (corresponding to the 50 by 50 grid) # nsurveys<- nsurveys # Number of replicate observations y <- array(dim = c(nsites, nsurveys)) # Array for counts # Ecological process beta0 <- log(mean.lambda) elev <- standardize(BerneseOberland$elevation) forest<- standardize(BerneseOberland$forest) loglam0 <- beta0 + beta[1] * elev + beta[2] * elev^2 loglam<- loglam0 + c(s$field) lam0 <- exp(loglam0) lam<- exp(loglam) # Determine actual abundances as Poisson rv’s with parameter lam N <- rpois(n = nsites, lambda = lam) sum(N > 0) / nsites # Finite-sample occupancy Ntotal<- sum(N) # Observation process # Detection probability as linear function of forest and wind speed alpha0 <- qlogis(mean.p) wind<- matrix(rnorm(nsites*nsurveys), nrow = nsites, ncol = nsurveys) p <- array(NA, dim = c(nsites, nsurveys)) for(j in 1:nsurveys){ p[,j]<- plogis(alpha0 + alpha[1] * forest + alpha[2] * wind[,j]) } # Go out and count things for (j in 1:nsurveys){ y[,j] <- rbinom(n = nsites, size = N, prob = p[,j]) } # Select a sample of sites for surveys set.seed(seeds[2]) surveyed.sites<- sort(sample(1:nsites, size = sample.size)) # Create the array of observed data by NA'ing out unsurveyed quadrats yobs<- y # Make a copy: the observed data yobs[-surveyed.sites,] <- NA # Minimal console output true <- sum(N) obs <- sum(apply(y, 1, max)) if(verbose) { cat("\n\nTrue total population size:", true) cat("\nTheoretically observed population size (sumMaxC) in 2500 quadrats:",obs) cat(paste("\nObserved population size in", sample.size,"surveyed quadrats:", sum(apply(yobs, 1, max), na.rm = TRUE))) cat("\nUnderestimation of abundance in total of 2500 quadrats:", round(100*(1-obs/true)), "%\n\n") } # Plot stuff if(show.plots){ # Restore graphical settings on exit --------------------------- oldpar <- par(mfrow = c(1,2), mar = c(5,8,5,2), cex.lab = 1.5, "cex.main") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # -------------------------------------------------------------- tryPlot <- try( { # Plot lambda as function of covariates (excluding spatial field) plot(BerneseOberland$elevation, lam0, cex = 1, pch = 16, main = "Expected counts (lambda) without spatial field", xlab = "Elevation", ylab = "lambda excl. spatial field", frame = FALSE, col = rgb(0, 0, 0, 0.3)) # Plot lambda as function of covariates (with spatial field) plot(BerneseOberland$elevation, lam, cex = 1, pch = 16, main = "Expected counts (lambda) with effect of spatial field", xlab = "Elevation", ylab = "lambda incl. spatial field", frame = FALSE, col = rgb(0, 0, 0, 0.3)) # Plot detection as a function of the two covariates par(mfrow = c(1,2), cex.main = 1.5) plot(wind, p, ylim = c(0,1), cex = 1, main = "Detection (p) ~ Wind speed", frame = FALSE, col = rgb(0,0,0,0.3), pch = 16) noforest<- forest < -1.34 points(wind[noforest,], p[noforest,], col = 'blue', pch = 16, cex = 1) legend('topright', 'blue: sites with no forest') plot(BerneseOberland$forest, apply(p, 1, mean), ylim = c(0,1), cex = 1, main = "Detection (p) ~ Forest cover", frame = FALSE, col = rgb(0,0,0,0.3), pch = 16) # Summary set of plots par(mfrow = c(2, 3), mar = c(2,2,4,6)) r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = BerneseOberland$elevation)) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Elevation (metres)") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = BerneseOberland$forest)) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Forest cover (%)") r <- raster::rasterFromXYZ(data.frame(x = s$gr[,1], y = s$gr[,2], z = c(s$field))) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Spatial effect (neg.exp. corr.)") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = N)) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = paste("Abundance (N, truncated at", truncN, ")"), zlim = c(0, truncN)) r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = apply(p, 1, mean))) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Average detection probability") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = apply(y, 1, max))) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = paste("Max count (truncated at", truncN, ")\n with surveyed quadrats"), zlim = c(0, truncN)) points(BerneseOberland$x[surveyed.sites], BerneseOberland$y[surveyed.sites], pch = 16, col = "red", cex = 0.8) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # ------------------- arguments input ---------------------- nsurveys = nsurveys, mean.lambda = mean.lambda, beta = beta, mean.p = mean.p, alpha0 = alpha0, alpha = alpha, sample.size = sample.size, variance.RF = variance.RF, theta.RF = theta.RF, seeds = seeds, # ------------------- from BerneseOberland ---------------------- xcoord = BerneseOberland$x, ycoord = BerneseOberland$y, elevation = BerneseOberland$elevation, forest = BerneseOberland$forest, elevationS = elev, forestS = forest, # ------------------- generated variables ------------------------ wind = wind, field = s$field, beta0 = beta0, lam = lam, N = N, Ntotal = Ntotal, p = p, y = y, surveyed.sites = surveyed.sites, yobs = yobs)) } # End of function definition
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simNmixSpatial_AHM2_9.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simNmix - AHM1 section 6.5 p241 # Function to simulate data for binomial and multinomial mixture models under wide range of conditions (introduced in AHM1 Section 6.5) simNmix <- function(nsites = 267, nvisits = 3, mean.theta = 1, mean.lam = 2, mean.p = 0.6, area = FALSE, beta1.theta = 0, beta2.theta = 0, beta3.theta = 0, beta2.lam = 0, beta3.lam = 0, beta4.lam = 0, beta3.p = 0, beta5.p = 0, beta6.p = 0, beta.p.survey = 0, beta.p.N = 0, sigma.lam = 0, dispersion = 10, sigma.p.site = 0, sigma.p.visit = 0, sigma.p.survey = 0, sigma.p.ind = 0, Neg.Bin = FALSE, open.N = FALSE, show.plots = TRUE, verbose = TRUE) { # # This very general function generates single-season count data # under variants of the binomial N-mixture model of Royle (2004) and of the # multinomial N-mixture model of Royle et al (2007). # # Data are simulated at the level of each individual and individual-specific # detection heterogeneity can be included. As a side-effect, individual- # specific detection histories are generated and hence, data are also # be simulated under the corresponding multinomial N-mixture model. # # Broadly, the function can generate data under this most general model: # # 'Suitability' (zero-inflation) ~ cov1 + cov2 + cov3 # # Abundance ~ offset + cov2 + cov3 + cov4 + overdispersion # # Detection ~ cov3 + cov5 + cov6 + survey.covariate + # log(N+1) + eps.site + eps.visit + eps.survey + eps.individual # # Overdispersion in abundance is modelled either as a Poisson-log-normal with # a normal random site effect in lambda or with a Negative binomial with # mean lambda and a 'size', or dispersion, parameter. # Variable site areas can be specified to affect abundance as in an offset. # Abundance can be zero-inflated (this is the 'suitability' model). Note that # the zero-inflation parameter is called theta here (in unmarked it is called # psi). mean.phi is the probability that a site is suitable (i.e., 1 minus # the expected proportion of sites with structural zero abundance. # Site covariate 2 can affect both suitability and abundance, while covariate 3 # may affect all three levels. Hence, the function permits to simulate the # case where a single site covariate affects different levels in the process # (e.g., abundance and detection) in opposing directions (as for instance # in Kery, Auk, 2008) # Density-dependent detection can be modelled as a logistic-linear effect # of local abundance (centered and log(x+1) transformed) # Overdispersion in detection is modelled via normal random effects (the eps # terms above) specific to sites, visits, surveys or individuals. # Effects of covariates and random-effects factors are modelled # as additive on the link scale (log for abundance and # logit for suitability and detection). # # Data may be generated under one specific open-population model when # argument 'open.N' is set to TRUE. # # Written by Marc Kery, 2014-2015 # # Function arguments # nsites: number of sites # nvisits: number of visits per site # mean.theta: proportion of sites that can have non-zero abundance in principle: # suitability model for zero-inflation # mean.lam: Expected abundance at the average value of all # abundance covariates (and ignoring random site effects): abundance model # mean.p: Expected detection at the average value of all # detection covariates (and ignoring all random effects): detection model # area: determines area of sites (A), defaults to A=1 (i.e., all identical), # but you can supply a vector of site areas of length nsites instead. # beta1.theta: coefficient of site covariate 1 in suitability model # beta2.theta: coefficient of site covariate 2 in suitability model # beta3.theta: coefficient of site covariate 3 in suitability model # beta2.lam: coefficient of site covariate 2 in abundance model # beta3.lam: coefficient of site covariate 3 in abundance model # beta4.lam: coefficient of site covariate 4 in abundance model # beta3.p: coefficient of site covariate 3 in detection model # beta5.p: coefficient of site covariate 5 in detection model # beta6.p: coefficient of site covariate 6 in detection model # beta.p.survey: coefficient of survey ('observational') covariate on p # beta.p.N: coefficient of centered local population size (log(N+1)) in # detection model (i.e., coef. for density-dependent detection prob.) # sigma.lam: "Overdispersion SD" in linear predictor of abundance # dispersion: 'size' or extra-Poisson dispersion of Negative binomial # sigma.p.site: "Overdispersion SD" in linear predictor of # detection coming from random site effects # sigma.p.visit: "Overdispersion SD" in linear predictor of # detection coming from random visit effects # sigma.p.survey: "Overdispersion SD" in linear predictor of # detection coming from random site-by-survey effects # sigma.p.ind: "Overdispersion SD" in linear predictor of # detection coming from random site-by-individual effects # Neg.Bin: if FALSE, any overdispersion in abundance is modelled by # a Poisson log-normal; if TRUE, abundance overdispersion is modelled # by adoption of a Negative binomial distribution for latent N # Open.N: if TRUE, data are simulated under one specific form of an open # population, where N in the first occasion is drawn from the specified # mixture distribution and for all further occasions j, we have # N_ij ~ Poisson(N_i(j-1)). With open.N = TRUE, we must have # sigma.p.ind = 0, show.plots = FALSE and nvisits >1. # show.plots: if TRUE, plots of the data will be displayed; set to FALSE # if you are running simulations. if(FALSE) x <- NULL # Fix issues with 'curve' logit <- plogis # allows 'logit' to appear in axis label instead of 'plogis' # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) nvisits <- round(nvisits[1]) stopifnotProbability(mean.theta) stopifNegative(mean.lam, allowZero=FALSE) stopifnotProbability(mean.p) stopifNegative(sigma.lam) stopifNegative(dispersion, allowZero=FALSE) stopifNegative(sigma.p.site) stopifNegative(sigma.p.visit) stopifNegative(sigma.p.survey) stopifNegative(sigma.p.ind) # -------------------------------------------- # Create indices nreps <- rep(nvisits, nsites) # No. visits (reps) per site site <- 1:nsites # Site index at site level site.per.unit <- rep(1:nsites, each = nvisits) # Site index at rep level if(verbose) { cat("***** New simulation *****\n\n") cat("No. sites visited: ", nsites, "\n") cat("No. rep. visits: ", nvisits, "\n") cat("Total no. visits: ", sum(nreps), "\n\n") } # Generate covariates with standardised values between -2 and 2 # Site covariates 1-6 site.cov <- matrix(runif(n = nsites*6, -2, 2), ncol = 6) colnames(site.cov) <- c("cov1", "cov2", "cov3", "cov4", "cov5", "cov6") # Survey covariate survey.cov <- matrix(runif(n = nsites*nvisits, -2, 2), ncol = nvisits) # get site-specific values for offset if(area[1] == FALSE) A <- rep(1, nsites) # means no offset if(area[1] != FALSE) A <- area # use supplied vector as area for offset # Simulate ecological process: # (1) 'suitability' (leading to zero-inflation) # Zero-inflation: create "suitability" indicator z # Linear predictor of suitability model alpha.theta <- ifelse(mean.theta == 1, 25, qlogis(mean.theta)) # Avoid Inf. theta <- plogis(alpha.theta + beta1.theta * site.cov[,1] + beta2.theta * site.cov[,2] + beta3.theta * site.cov[,3]) s <- rbinom(n = nsites, 1, theta) # Suitability indicator # (2) Abundance process (for sites suitable in principle) # Linear predictor of abundance model excluding random effects # this is directly the lin.pred. for the Neg.Bin abundance model log.lam.partial <- log(A) + log(mean.lam) + beta2.lam * site.cov[,2] + beta3.lam * site.cov[,3] + beta4.lam * site.cov[,4] # Draw abundance under the (zero-inflated) Poisson distribution # (For baseline comparison of the abundance distributions in histo below) N.P <- rpois(n = nsites, lambda = s * exp(log.lam.partial)) # Draw abundance under the (zero-inflated) negative binomial distribution N.NB <- rnbinom(n = nsites, mu = s * exp(log.lam.partial), size = dispersion) # Draw abundance under the (zero-inflated) Poisson log-normal distribution # Random site effects in lambda, zero out if Neg.Bin == TRUE eta.lam <- rnorm(n = nsites, sd = sigma.lam * (1 - Neg.Bin)) # Linear predictor of PLN abundance model including random effects log.lam <- log.lam.partial + eta.lam # Draw realised values of abundance at each site N.PLN <- rpois(n = nsites, lambda = s * exp(log.lam)) if(Neg.Bin == TRUE){ # Negative-binomial N's fed into variable N .... N <- N.NB } else { # ... or else those from PLN mixture N <- N.PLN } Ntotal <- sum(N) # Add up N over all M sites # Ecological process when population open (open.N == TRUE) N.open <- matrix(NA, nrow = nsites, ncol = nvisits) if(open.N){ N.open[,1] <- N for(j in 2:nvisits){ N.open[,j] <- rpois(nsites, N.open[,j-1]) } #cor(N.open) #matplot(1:nvisits, t(N.open), type = 'l') } # Visualization of suitability and abundance code now moved to line 281 # Simulate observation process conditional on true state N # Create structures to be filled nslice <- max(N)+1 # Max. number of slices in DH 3D array inds <- DH <- p <- logit.p.partial <- logit.p <- array(NA, dim = c(nsites, nvisits, nslice)) C <- eta.p.survey <- array(NA, dim = c(nsites, nvisits)) # Determine occupied sites and table of 'existing' individuals (inds) occ.sites <- which(N>0) for(i in occ.sites){ inds[i,,1:N[i]] <- 1 } # Draw random site effects in p eta.p.site <- rnorm(n=nsites, mean = 0, sd = sigma.p.site) # Draw random visit effects in p eta.p.visit <- rnorm(n=nvisits, mean = 0, sd = sigma.p.visit) # Draw random survey (= site-by-survey) effects in p eta.p.survey <- matrix(rnorm(n = nsites*nvisits, sd = sigma.p.survey), nrow = nsites, ncol = nvisits) # Draw random individual (= site-by-ind) effects in p (NOT site-ind-visit !) eta.p.ind <- array(rnorm(n = nsites* max(N), mean = 0, sd = sigma.p.ind), dim = c(nsites, nslice)) #eta.p.ind[N == 0,] <- NA # NA out effects for non-existing inds. for(i in 1:nsites){ eta.p.ind[i,(N[i]+1):nslice] <- NA } # For default closed population (open.N == FALSE) if(open.N == FALSE){ # Sample individuals to get individual detection histories (DH) for each site (note that DH[i,,] == 'NA' when N[i] = 0) for(i in occ.sites){ # Loop over occupied sites (with N>0) for(j in 1:nvisits){ # Loop over visits for(n in 1:nslice){ # Loop over individuals # Linear predictor of detection model excl. random effects logit.p.partial[i,j,n] <- qlogis(mean.p) + beta3.p * site.cov[i,3] + beta5.p * site.cov[i,5] + beta6.p * site.cov[i,6] + beta.p.survey * survey.cov[i,j] + beta.p.N * (log(N[i]+1)-mean(log(N[i]+1))) # Linear predictor of detection model including random effects logit.p[i,j,n] <- logit.p.partial[i,j,n] + eta.p.site[i] + eta.p.visit[j] + eta.p.survey[i,j] + eta.p.ind[i,n] # Apply inverse link function p[i,j,n] <- plogis(logit.p[i,j,n]) # Get individual detection histories: NA out non-existing inds. # (i.e., at sites where N=0) # prob <- inds[i,j,n] * p[i,j,n] # NA out non-existing individuals # DH[i,j,n] <- rbinom(n=1, size = 1, prob = prob) # a warning every time prob is NA if(!is.na(inds[i,j,n])) DH[i,j,n] <- rbinom(n=1, size = 1, prob = p[i, j, n]) ## MM 2017-03-10 # else ... the value stays as NA } } } # DH <- DH[,,-nslice] # Get rid of unused last slice # p <- p[,,-nslice] # Get rid of unused last slice DH <- DH[,,-nslice, drop=FALSE] # Get rid of unused last slice ## MM 2017-03-10 p <- p[,,-nslice, drop=FALSE] # Get rid of unused last slice # Get counts C by tallying up detection histories (DH) # Also get the sum over sites of max counts # Account for possible single-visit data if(length(dim(DH)) == 3){ # for typical multi-visit design C <- apply(DH, c(1,2), sum, na.rm = TRUE) summax <- sum(apply(C, 1, max)) } if(length(dim(DH)) == 2){ # to account single-visit design C <- apply(DH, 1, sum, na.rm = TRUE) summax <- sum(C) } pp <- N.open <- NA # Not available for open.N == FALSE } # end open.N == FALSE # For open population (open.N == TRUE) if(open.N == TRUE){ pp <- matrix(NA, nrow = nsites, ncol = nvisits) # Define detection prob for(j in 1:nvisits){ # Loop over visits # Full linear predictor of detection model pp[,j] <- plogis(qlogis(mean.p) + beta3.p * site.cov[i,3] + beta5.p * site.cov[i,5] + beta6.p * site.cov[i,6] + beta.p.survey * survey.cov[i,j] + beta.p.N * (log(N[i]+1)-mean(log(N[i]+1))) + eta.p.site + eta.p.visit[j] + eta.p.survey[,j]) C[,j] <- rbinom(nsites, N.open[,j], pp[,j]) } summax <- sum(apply(C, 1, max)) # Fill some things used in the function output p <- pp ; DH <- NA } if(show.plots){ # Restore graphical settings on exit --------------------------- oldpar <- par("mfrow", "cex", "cex.main") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # -------------------------------------------------------------- tryPlot <- try( { # Visualization of suitability and abundance # '''''''''''''''''''''''''''''''''''''''''' # Page 1: Plots features of the suitability part of the system par(mfrow = c(2, 2), cex.main = 1) barplot(table(s), main = "Number unsuitable and suitable sites", col = "grey") plot(site.cov[,1], s, ylim = c(0,1), main = "'Suitability' & site covariate 1") curve(logit(alpha.theta + beta1.theta * x), -2, 2, col = "red", add = TRUE, lwd = 3) plot(site.cov[,2], s, ylim = c(0,1), main = "'Suitability' & site covariate 2") curve(logit(alpha.theta + beta2.theta * x), -2, 2, col = "red", add = TRUE, lwd = 3) plot(site.cov[,3], s, ylim = c(0,1), main = "'Suitability' & site covariate 3") curve(logit(alpha.theta + beta3.theta * x), -2, 2, col = "red", add = TRUE, lwd = 3) # Page 2: Plots features of the abundance part of the system par(mfrow = c(3, 3), cex.main = 1) ylim = c(min(exp(log.lam.partial))-1, max(N)) curve(exp(log(mean.lam) + beta2.lam * x), -2, 2, xlab = "Site covariate 2", main = "Site covariate 2 & lambda", ylab = "partial lambda", col = "red", lwd = 3) curve(exp(log(mean.lam) + beta3.lam * x), -2, 2, xlab = "Site covariate 3", main = "Site covariate 3 & lambda", ylab = "partial lambda", col = "red", lwd = 3) curve(exp(log(mean.lam) + beta4.lam * x), -2, 2, xlab = "Site covariate 4", main = "Site covariate 4 & lambda", ylab = "partial lambda", col = "red", lwd = 3) plot(site.cov[,2], exp(log.lam.partial), col = "red", xlab = "Site covariate 2", ylab = "lambda", main = "Marginal lambda \n(excl. site random effects)", ylim = ylim) plot(site.cov[,3], exp(log.lam.partial), col = "red", xlab = "Site covariate 3", ylab = "lambda", main = "Marginal lambda \n(excl. site random effects)", ylim = ylim) plot(site.cov[,4], exp(log.lam.partial), col = "red", xlab = "Site covariate 4", ylab = "lambda", main = "Marginal lambda \n(excl. site random effects)", ylim = ylim) plot(site.cov[,2], exp(log.lam), col = "red", xlab = "Site covariate 2", ylab = "lambda", main = "Marginal lambda \n(incl. site random effects)", ylim = ylim) plot(site.cov[,3], exp(log.lam), col = "red", xlab = "Site covariate 3", ylab = "lambda", main = "Marginal lambda \n(incl. site random effects)", ylim = ylim) plot(site.cov[,4], exp(log.lam), col = "red", xlab = "Site covariate 4", ylab = "lambda", main = "Marginal lambda \n(incl. site random effects)", ylim = ylim) # Page 3: Realized adundances par(mfrow = c(1, 3), cex = 1) plot(site.cov[,2], N, col = "red", xlab = "Site covariate 2", ylab = "N", main = "Realized abundance (N)", ylim = ylim) plot(site.cov[,3], N, col = "red", xlab = "Site covariate 3", ylab = "N", main = "Realized abundance (N)", ylim = ylim) plot(site.cov[,4], N, col = "red", xlab = "Site covariate 4", ylab = "N", main = "Realized abundance (N)", ylim = ylim) # Page 4: Random site effects if !Neg.Bin, histogram for N for both if(Neg.Bin == TRUE){ xlim <- c(min(c(N.P, N.NB)), max(c(N.P, N.NB))) par(mfrow = c(1, 1), cex.main = 1) histCount(N.P, N.NB, xlab = "Abundance N", main = paste("N under (zero-infl.) Neg.bin (red)", "and (zero-infl.) Poisson (blue) mixtures", sep="\n")) } else { xlim <- c(min(c(N.P, N.PLN)), max(c(N.P, N.PLN))) par(mfrow = c(1, 2), cex.main = 1) hist(eta.lam, col = "grey", main = "Random site effects in abundance") histCount(N.P, N.PLN, xlab = "Abundance N", main = paste(c("N under (zero-infl.) Poisson log-normal (red)", "compared with baseline (zero-infl.) Poisson mixture (blue)"), sep="\n")) } # Plots and summaries of observation process # '''''''''''''''''''''''''''''''''''''''''' # Page 5: Effects on p par(mfrow = c(3,2), cex.main = 1) curve(logit(qlogis(mean.p) + beta3.p * x), -2, 2, xlab = "Site covariate 3", main = "Site covariate 3 & detection", ylab = "p", col = "red", lwd = 3) curve(logit(qlogis(mean.p) + beta5.p * x), -2, 2, xlab = "Site covariate 5", main = "Site covariate 5 & detection", ylab = "p", col = "red", lwd = 3) curve(logit(qlogis(mean.p) + beta6.p * x), -2, 2, xlab = "Site covariate 6", main = "Site covariate 6 & detection", ylab = "p", col = "red", lwd = 3) curve(logit(qlogis(mean.p) + beta.p.survey * x), -2, 2, xlab = "Survey covariate", main = "Survey covariate & detection", ylab = "p", col = "red", lwd = 3) curve(logit(qlogis(mean.p) + beta.p.N * x), log(0+1), log(max(N)+1), xlab = "Effect of log(N+1) in logit(p)", ylab = "p", col = "red", lwd = 3) # Page 6: Random effects in p par(mfrow = c(2,2), cex.main = 1) hist(eta.p.site, col = "grey", main = "Random site eff. in p", breaks = 50) hist(eta.p.visit, col = "grey", main = "Random visit eff. in p", breaks = 50) hist(eta.p.survey, col = "grey", main = "Random site-survey eff. in p", breaks = 50) hist(eta.p.ind, col = "grey", main = "Random ind. eff. in p", breaks = 50) # Page 7: partial p and p (site covars) par(mfrow = c(3,2), cex.main = 1) matplot(site.cov[,3], apply(plogis(logit.p.partial), c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 3", ylab = "Partial p", main = "Partial expected detection \n(no random effects)", ylim = c(0,1), pch = 1) matplot(site.cov[,3], apply(p, c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 3", ylab = "p", main = "Detection probability (with random effects)", ylim = c(0,1), pch = 1) matplot(site.cov[,5], apply(plogis(logit.p.partial), c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 5", ylab = "Partial p", main = "Partial expected detection \n(no random effects)", ylim = c(0,1), pch = 1) matplot(site.cov[,5], apply(p, c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 5", ylab = "p", main = "Detection probability (with random effects)", ylim = c(0,1), pch = 1) matplot(site.cov[,6], apply(plogis(logit.p.partial), c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 6", ylab = "Partial p", main = "Partial expected detection \n(no random effects)", ylim = c(0,1), pch = 1) matplot(site.cov[,6], apply(p, c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Site covariate 6", ylab = "p", main = "Detection probability (with random effects)", ylim = c(0,1), pch = 1) # Page 8: partial p and p (survey covars), p and realised p par(mfrow = c(2,2), cex.main = 1) matplot(survey.cov, apply(plogis(logit.p.partial), c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Survey covariate", ylab = "Partial p", main = "Partial expected detection \n(no random effects)", ylim = c(0,1), pch = 1) matplot(survey.cov, apply(p, c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Survey covariate", ylab = "p", main = "Detection probability (with random effects)", ylim = c(0,1), pch = 1) matplot(N, apply(p, c(1,2), mean, na.rm = TRUE), col = "red", xlab = "Local abundance (N)", ylab = "p", main = "Detection probability", ylim = c(0,1), pch = 1) hist(p, col = "grey", main = "Realized detection probability \n(blue=mean)", breaks = 50) abline(v = mean(p, na.rm = TRUE), col = "blue", lwd = 2) # Page 9: Observed counts par(mfrow = c(3,3), cex.main = 1) # hist(C, col = "grey", main = "Observed counts", breaks = 50) histCount(C, NULL, color = "grey", main = "Observed counts", xlab = "C") matplot(site.cov[,1], C, xlab = "Site covariate 1", ylab = "Counts", main = "Obs. counts vs. site covariate 1") matplot(site.cov[,2], C, xlab = "Site covariate 2", ylab = "Counts", main = "Obs. counts vs. site covariate 2") matplot(site.cov[,3], C, xlab = "Site covariate 3", ylab = "Counts", main = "Obs. counts vs. site covariate 3") matplot(site.cov[,4], C, xlab = "Site covariate 4", ylab = "Counts", main = "Obs. counts vs. site covariate 4") matplot(site.cov[,5], C, xlab = "Site covariate 5", ylab = "Counts", main = "Obs. counts vs. site covariate 5") matplot(site.cov[,6], C, xlab = "Site covariate 6", ylab = "Counts", main = "Obs. counts vs. site covariate 6") matplot(survey.cov, C, xlab = "Survey covariate", ylab = "Counts", main = "Obs. counts vs. survey covariate") plot(rep(N, nvisits), C, xlab = "True state (abundance N)", ylab = "Obs.state (counts C)", main = "Obs. counts vs. true abundance", xlim = c(min(N,C), max(N,C)), ylim = c(min(N,C), max(N,C))) abline(0,1) }, silent = TRUE ) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Compute naive 'overdispersion coefficients' at level latent N and observed C odcN <- round(var(N)/mean(N),2) # Overdispersion coefficient if(open.N){ odcN <- round(var(c(N.open))/mean(N.open),2) # Overdispersion coefficient } odcC <- round(var(c(C))/mean(C),2) # Overdispersion coefficient if(verbose) { cat("\nNaive overdispersion measure (var/mean) for true abundance (N):", odcN,"\n") cat("Naive overdispersion measure (var/mean) for observed counts (C):", odcC,"\n") } # Output # *** Key output elements are *** # DH: detection history for each of N individuals detected at the nsites sites # C: summary of DH: number of individuals detected for each site and visit # return(list(nsites = nsites, nvisits = nvisits, nobs = sum(nreps), Neg.Bin = Neg.Bin, open.N = open.N, area = area, mean.theta = mean.theta, mean.lam = mean.lam, mean.p = mean.p, beta1.theta = beta1.theta, beta2.theta = beta2.theta, beta3.theta = beta3.theta, beta2.lam = beta2.lam, beta3.lam = beta3.lam, beta4.lam = beta4.lam, beta3.p = beta3.p, beta5.p = beta5.p, beta6.p = beta6.p, beta.p.survey = beta.p.survey, beta.p.N = beta.p.N, sigma.lam = sigma.lam, dispersion = dispersion, sigma.p.site = sigma.p.site, sigma.p.visit = sigma.p.visit, sigma.p.survey = sigma.p.survey, sigma.p.ind = sigma.p.ind, site.cov = site.cov, survey.cov = survey.cov, log.lam = log.lam, s = s, N = N, p = p, DH = DH, N.open = N.open, C = C, eta.lam = eta.lam, eta.p.site = eta.p.site, eta.p.visit = eta.p.visit, eta.p.survey = eta.p.survey, eta.p.ind = eta.p.ind, odcN = odcN, odcC = odcC, Ntotal = Ntotal, summax = summax)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simNmix_AHM1_6-5_Simulate_binom-multinom_mixtures.R
# AHM2 12.2 # It generates counts from a single population observed over T years and which can be observed with or without imperfect detection, ... The goal of this function is to focus on what happens with relative-abundance inference when temporal patterns in abundance are confounded with temporal patterns in detection probability. Hence, we can simulate a stable population or one with linear increase or decrease with specified start and end points, and around which there is Poisson noise. The observed counts are Binomial outcomes with a detection probability which can similarly be chosen to be constant or change linearly over time. # Define the simulation function simNpC <- function( T = 20, # length of time series expN = c(100, 75), # expected abundance at start and end of period, linear trend dp = c(0.5, 0.5), # detection probability at start and end of period, linear trend show.plot = TRUE) # whether to show plots or not { # Function simulates a single time-series of counts of length T # years from a population with a linear trend in expected abundance # (expN) leading from expN[1] to expN[2] and with a linear trend in # detection probability (dp) leading from dp[1] to dp[2]. # Default is for a strongly declining population with constant p = 0.5. # ---- Checks and fixes for input data ------------------- T <- round(T[1]) stopifnotLength(expN, 2) stopifNegative(expN) stopifnotLength(dp, 2) stopifnotProbability(dp) # ----------------------------------------------------- # Pick values of expected abundance (lam) and # detection probability (dp) for each year lambda <- seq(expN[1], expN[2], length.out = T) p <- seq(dp[1], dp[2], length.out = T) # Draw realized abundance (N) and the observed counts (C) N <- rpois(T, lambda) C <- rbinom(T, N, p) # Plots if(show.plot) { oldpar <- par(mfrow = c(1, 3), mar = c(5,5,1,1), cex.axis = 1.2, cex.lab = 1.2, cex = 1.2) on.exit(par(oldpar)) tryPlot <- try( { plot(1:T, lambda, xlab = 'Year', ylab = 'Expected abundance (lambda)', ylim = c(0, max(expN)), type = 'l', lwd = 3, col = 2, frame = FALSE) plot(1:T, p, xlab = 'Year', ylab = 'Detection prob. (p)', ylim = c(0, 1), type = 'l', lwd = 3, col = 4, frame = FALSE) plot(1:T, N, xlab = 'Year', ylab = 'Counts, Abundance', ylim = c(0, max(N)), pch = 16, frame = FALSE) points(1:T, C, pch = 1) lines(1:T, lambda, col = 2, lwd = 2) lines(1:T, lambda*p, col = 1, lwd = 2, lty=2) legend(1, 0.24*max(N), c('True N', 'Observed C'), pch = c(16,1), cex = 0.8, bty = 'n') legend(1, 0.14*max(N), c('Expected N (lambda)', 'Exp. relative abundance\n (lambda * p)'), lty = c(1,2), lwd = 3, col = c(2,1), cex = 0.8, bty = 'n') }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # ---------- arguments input -------------------------- T = T, expN = expN, dp = dp, # ------------ generated values ----------------------- lambda = lambda, # expected abundance for each year p = p, # detection probability (dp) for each year N = N, # realised abundance C = C)) # observed counts }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simNpC_AHM2_1-2-2.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # SimOccCat = adaptation of simOcc (AHM1 section 10.5 p577) to allow for categorical covariates # Function to simulate data for static occupancy models under wide range of conditions simOccCat <- function(M = 267, J = 3, mean.occupancy = 0.6, beta1 = 0, beta2 = 0, beta3 = 0, mean.detection = 0.3, time.effects = c(0, 0), alpha1 = 0, alpha2 = 0, alpha3 = 0, sd.lp = 0, b = 0, nHab = 5, range.HAB = 2, nObs = 10, range.OBS = 4, # new arguments show.plots = TRUE){ # # The new arguments are: # nHab: the number of categories for the site covariate # range.HAB: controls the size of the effects for the categories of the site covariate # nObs: the number of categories for the detection covariate # range.OBS: controls the size of the effects for the categories of the detection covariate if(FALSE) x <- NULL # Fudge to stop R CMD check complaining about 'curve' # Checks and fixes for input data ----------------------------- M <- round(M[1]) J <- round(J[1]) stopifnotProbability(mean.occupancy) stopifnotProbability(mean.detection) stopifNegative(sd.lp) # MORE TODO # -------------------------------------------- # Create 2 continuous site covariates (elev, forest) and 1 obs. covar. (wind) elev <- runif(n = M, -1, 1) # Scaled elevation forest <- runif(n = M, -1, 1) # Scaled forest cover wind <- array(runif(n = M*J, -1, 1), dim = c(M, J)) # Scaled wind speed # Create categorical covariates with approximately equal sized categories HAB <- sample(nHab, M, replace = TRUE) OBSvec <- sample(nObs, M*J, replace = TRUE) # No constraint that observers only visit sites once OBS <- matrix(OBSvec, M, J) # Create coefficients for HAB factor that sum to 0, calculate HAB effect coefHAB <- runif(nHab, 0, range.HAB) coefHAB <- coefHAB - mean(coefHAB) # Now sums to zero HABeffect <- coefHAB[HAB] # Create coefficients for OBS factor that sum to 0, calculate OBS effect coefOBS <- runif(nObs, 0, range.OBS) coefOBS <- coefOBS - mean(coefOBS) # Now sums to zero OBSeffect <- matrix(coefOBS[OBSvec], nrow=M, ncol=J) # Model for occurrence (presence/absence): simulate system state z beta0 <- qlogis(mean.occupancy) # Mean occurrence on link scale psi <- plogis(beta0 + beta1*elev + beta2*forest + beta3*elev*forest + HABeffect) z <- rbinom(n = M, size = 1, prob = psi) # Realised occurrence (true state) # Model for observations: simulate observations y, given system state z alpha0 <- qlogis(mean.detection) # mean detection on link scale gamma <- runif(J, min(time.effects), max(time.effects)) # (fixed) time effects eps <- rnorm(M, 0, sd.lp) # Site (random) effects # Generate detection probability array without behavioural effect # for(j in 1:J){ # logit.p0[,j] <- alpha0 + gamma[j] + alpha1*elev + alpha2*wind[,j] + alpha3*elev*wind[,j] + eps + OBSeffect[,j] # } tmp <- alpha0 + alpha1*elev + alpha2*wind + alpha3*elev*wind + eps + OBSeffect logit.p0 <- sweep(tmp, 2, gamma, "+") # Generate detection/nondetection data: the measurements of presence/absence y <- p <- matrix(NA, M, J) # For the first capture occasion (no behavioural response possible) p[,1] <- plogis(logit.p0[,1]) # 'p' is needed for the output y[,1] <- rbinom(n = M, size = z, prob = p[,1]) # y[,1] <- rbinom(n = M, size = 1, prob = z * p0[,1]) # SAME # Later capture occasions (potentially with contribution of b) for (j in 2:J){ p[, j] <- plogis(logit.p0[,j] + b*y[, j-1]) y[, j] <- rbinom(n = M, size = z, prob = p[, j]) } # True and observed measures of 'distribution' sumZ <- sum(z) # Total occurrence (all sites) sumZ.obs <- sum(apply(y,1,max)) # Observed number of occ sites psi.fs.true <- sum(z) / M # True proportion of occ. sites in sample psi.fs.obs <- mean(apply(y,1,max)) # Observed proportion of occ. sites in sample if(show.plots){ # Restore graphical settings on exit ------------------------- oldpar <- par("mfrow", "cex.main", "cex.lab", "mar") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # ------------------------------------------------------------ tryPlot <- try( { # Plots for system state # ---------------------- par(mfrow = c(2, 3)) # Expected values curve(plogis(beta0 + beta1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Expected occupancy probability", lwd = 2, main="Variation of occupancy probability\nwith elevation") abline(h=mean.occupancy, col="blue") curve(plogis(beta0 + beta2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Forest cover", ylab = "", lwd = 2, main="Variation of occupancy probability\nwith forest cover") abline(h=mean.occupancy, col="blue") plot(x=1:nHab, y=plogis(beta0 + coefHAB), ylim=c(0,1), pch=15, cex=2, col = "red", xlab="Habitat type", ylab="", frame=FALSE, main="Variation of occupancy probability\nbetween habitat types") abline(h=mean.occupancy, col="blue") legend('topleft', bty='n', lty=1, col='blue', legend='mean occupancy') # Simulated values plot(elev, psi, frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Simulated occupancy probability") plot(forest, psi, frame.plot = FALSE, ylim = c(0, 1), xlab = "Forest cover", ylab = "") plot(jitter(HAB), psi, frame.plot = FALSE, ylim = c(0, 1), xlab = "Habitat type", ylab = "") abline(v=(1:(nHab-1))+0.5, col='gray') # Plots for observation process # ----------------------------- par(mfrow = c(3, 3)) #, cex.main = 1.2, cex.lab = 1.5, mar = c(5,5,3,2)) # Plots for elevation and time curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Expected detection (p)", lwd = 2, main = "Effects of elev and time") for(j in 1:J){ curve(plogis(alpha0 + gamma[j] + alpha1*x),-1,1,lwd = 1, col="grey", add=TRUE) } abline(h=mean.detection, col="blue") # Plots for wind speed and time curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Wind speed", ylab = "", lwd = 2, main = "Effects of wind and time") for(j in 1:J){ curve(plogis(alpha0 + gamma[j] + alpha2*x),-1,1,lwd = 1, col="grey", add=TRUE) } abline(h=mean.detection, col="blue") # Plots for observer and time plot(x=1:nObs, y=plogis(alpha0 + coefOBS), ylim=c(0,1), pch=15, col="red", xlab="Observer", ylab="", frame=FALSE, main="Effects of observer and time") for(j in 1:J){ points(x=1:nObs, y=plogis(alpha0 + coefOBS + gamma[j]), pch=15, col="grey") } points(x=1:nObs, y=plogis(alpha0 + coefOBS), pch=15, col="red") abline(h=mean.detection, col="blue") legend('topleft', bty='n', lty=1, col='blue', legend='mean detection') # Plots for elevation and 'heterogeneity' curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Expected detection (p)", lwd = 2, main = "Elevation and site heterogeneity") for(i in 1:M){ curve(plogis(alpha0 + eps[i] + alpha1*x),-1,1,lwd = 1, col="grey", add=T) } curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", lwd = 2, add = TRUE) abline(h=mean.detection, col="blue") # Plots for wind speed and 'heterogeneity' curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Wind speed", ylab = "", lwd = 2, main = "Wind and site heterogeneity") for(i in 1:M){ curve(plogis(alpha0 + eps[i] + alpha2*x),-1,1,lwd = 1, col="grey", add=TRUE) } curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", lwd = 2, add = TRUE) abline(h=mean.detection, col="blue") # Plots for observer and heterogeneity plot(x=1:nObs, y=plogis(alpha0 + coefOBS), ylim=c(0,1), pch=15, col="red", xlab="Observer", ylab="", frame=FALSE, main="Observer and site heterogeneity") for(i in 1:M){ points(x=1:nObs, y=plogis(alpha0 + coefOBS + eps[i]), pch=15, col="grey") } points(x=1:nObs, y=plogis(alpha0 + coefOBS), pch=15, col="red") abline(h=mean.detection, col="blue") # Plot for elevation and 'behavioural response' p0plot <- plogis(logit.p0) p1plot <- plogis(logit.p0 + b) caught.before <- cbind(FALSE, y[, 1:(J-1)] == 1) p0plot[caught.before] <- NA p1plot[!caught.before] <- NA matplot(elev, p0plot, xlab = "Elevation", ylab = "Simulated detections", main = "p ~ elevation\nred=detected before", pch = 1, ylim = c(0,1), col = "blue", frame.plot = FALSE) if(sum(is.finite(p1plot)) > 0) matplot(elev, p1plot, pch = 16, col = "red", add = TRUE) # Plot for wind speed and 'behavioural response' matplot(wind, p0plot, xlab = "Wind speed", ylab = "", main="p ~ wind\n", pch = 1, ylim = c(0,1), col = "blue", frame.plot = FALSE) if(sum(is.finite(p1plot)) > 0) matplot(wind, p1plot, pch = 16, col = "red", add = TRUE) # Plot for observer and 'behavioural response' plot(jitter(OBS), p0plot, xlab = "Observer", ylab = "", main = "p ~ observer\nblue=not detected before", pch = 1, ylim = c(0,1), col = "blue", frame.plot = FALSE) if(sum(is.finite(p1plot)) > 0) points(jitter(OBS), p1plot, pch=16, col="red") abline(v=(1:(nObs-1))+0.5, col='gray') }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # arguments input M = M, J = J, mean.occupancy = mean.occupancy, beta0 = beta0, beta1 = beta1, beta2 = beta2, beta3 = beta3, mean.detection = mean.detection, time.effects = time.effects, alpha0 = alpha0, alpha1 = alpha1, alpha2 = alpha2, alpha3 = alpha3, sd.lp = sd.lp, b = b, nHab = nHab, range.HAB = range.HAB, nObs = nObs, range.OBS = range.OBS, # Generated values gamma = gamma, eps = eps, elev = elev, forest = forest, wind = wind, HAB = HAB, OBS = OBS, coefHAB = coefHAB, coefOBS = coefOBS, psi = psi, z = z, p = p, p0 = plogis(logit.p0), p1 = plogis(logit.p0 + b), y = y, sumZ = sumZ, sumZ.obs = sumZ.obs, psi.fs.true = psi.fs.true, psi.fs.obs = psi.fs.obs)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simOccCat.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # From Marc, 13 June 2019 # "Define function simOccSpatial.docx" # Originally simNmixSpatial. # ------ Define function simOccSpatial -------- simOccSpatial <- function(nsurveys = 3, mean.psi = 0.6, beta = c(2, -2), mean.p = 0.4, alpha = c(-1, -1), sample.size = 500, variance.RF = 1, theta.RF = 10, seeds = c(10, 100), show.plots = TRUE, verbose = TRUE){ # Simulates replicated detection/nondetection data under a spatial, static occupancy model for a semi-realistic landscape in a square of 50x50 km in the Bernese Oberland near Interlaken, Switzerland. # Unit of the data simulation is a 1km2 quadrat, hence, there are 2500 units (this cannot be varied in the function). # For occupancy, the function allows you to specify a quadratic effect of elevation, the data for which are contained in the data set BerneseOberland, which is part of the AHMbook package and is a subset of the data set 'Switzerland' in R package unmarked. # Then, a Gaussian spatial random field (s) with negative exponential correlation function is simulated using the AHMbook function simExpCorrRF. For that field, you can set the variance and the scale parameter theta (see helptext for that function for more details). Basically, the larger the value of theta.RF, the bigger are the 'islands' simulated in the random field. # The occupancy in each quadrat i is built up via the following linear predictor: # psi[i] <- qlogis(beta0 + beta1 * elev[i] + beta2 * elev[i]^2 + s[i]) # z[i] ~ Bernoulli(psi[i]) # Replicated detection/nondetection data are simulated as usual under a Bernoulli observation model, and detection probability is allowed to vary by one site and one observational covariate: respectively quadrat forest cover, which is real data in the BerneseOberland data set, and wind-speed, which is invented data. # Detection/nondetection data at each site (i) and for each occasion (j) are produced according to the following model: # p[i,j] <- plogis(alpha0 + alpha1 * forest[i] + alpha2 * wind[i,j]) # y[i,j] ~ Bernoulli(z[i] * p[i,j]) # Finally, we assume that not each one of the 2500 quadrats is surveyed. Hence, we allow you to choose the number of quadrats that are surveyed and these will then be randomly placed into the landscape. We then assume that the response variable will only be available for these surveyed quadrats, i.e., detection/nondetection data from all non-surveyed quadrats will be NA'd out. BerneseOberland <- NULL # otherwise "no visible binding for global variable 'BerneseOberland'" when checked data(BerneseOberland, envir = environment()) # Simulate spatial random field set.seed(seeds[1]) s <- simExpCorrRF(variance = variance.RF, theta = theta.RF, show.plots = show.plots) # Simulate Occupancy data with spatially correlated random effect in psi nsites <- 2500 # Number of sites (corresponding to the 50 by 50 grid) # nsurveys <- nsurveys # Number of replicate observations y <- array(dim = c(nsites, nsurveys)) # Array for the response # Ecological process beta0 <- qlogis(mean.psi) elev <- standardize(BerneseOberland$elevation) forest <- standardize(BerneseOberland$forest) lpsi0 <- beta0 + beta[1] * elev + beta[2] * elev^2 lpsi <- lpsi0 + c(s$field) psi0 <- plogis(lpsi0) psi <- plogis(lpsi) # Determine actual presence/absence as Bernoulli rv’s with parameter psi z <- rbinom(n = nsites, 1, psi) # Observation process # Detection probability as linear function of forest and wind speed alpha0 <- qlogis(mean.p) wind <- matrix(rnorm(nsites*nsurveys), nrow = nsites, ncol = nsurveys) p <- array(NA, dim = c(nsites, nsurveys)) for(j in 1:nsurveys){ p[,j] <- plogis(alpha0 + alpha[1] * forest + alpha[2] * wind[,j]) } # Go out and do those error-prone presence-absence surveys for (j in 1:nsurveys){ y[,j] <- rbinom(n = nsites, size = z, prob = p[,j]) } # Select a sample of sites for surveys set.seed(seeds[2]) surveyed.sites <- sort(sample(1:nsites, size = sample.size)) # Create the array of observed data by NA'ing out unsurveyed quadrats yobs <- y # Make a copy: the observed data yobs[-surveyed.sites,] <- NA mean(z) # Finite-sample occupancy nocc <- sum(z) # Minimal console output trueNocc <- sum(z) obsNocc <- sum(apply(y, 1, max)) true_psi_fs <- trueNocc / 2500 obs_psi_fs <- obsNocc / 2500 if(verbose) { cat("\n\n\nTrue number of occupied sites:", trueNocc) cat("\n\n\nObserved number of occupied sites:", obsNocc) cat("\n\n\nNumber of occupied sites where species missed:", trueNocc - obsNocc) cat("\nUnderestimation of 'species range size' in 2500 quadrats:", round(100*(1-obsNocc/trueNocc)), "%\n\n") } # Plot stuff if(show.plots){ # Restore graphical settings on exit --------------------------- oldpar <- par(mfrow = c(1,2), mar = c(5,8,5,2), cex.lab = 1.5, "cex.main") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # -------------------------------------------------------------- tryPlot <- try( { # Plot psi as function of covariates (excluding spatial field) plot(BerneseOberland$elevation, psi0, cex = 1, pch = 16, main = "Occupancy probability (psi) without spatial field", xlab = "Elevation", ylab = "psi excl. spatial field", frame = FALSE, col = rgb(0, 0, 0, 0.3)) # Plot psi as function of covariates (with spatial field) plot(BerneseOberland$elevation, psi, cex = 1, pch = 16, main = "Occupancy probability (psi) including effect of spatial field", xlab = "Elevation", ylab = "psi incl. spatial field", frame = FALSE, col = rgb(0, 0, 0, 0.3)) # Plot detection as a function of the two covariates par(mfrow = c(1,2), cex.main = 1.5) plot(wind, p, ylim = c(0,1), cex = 1, main = "Detection (p) ~ Wind speed", frame = FALSE, col = rgb(0,0,0,0.3), pch = 16) noforest <- forest < -1.34 points(wind[noforest,], p[noforest,], col = 'blue', pch = 16, cex = 1) legend('topright', 'blue: sites with no forest') plot(BerneseOberland$forest, apply(p, 1, mean), ylim = c(0,1), cex = 1, main = "Detection (p) ~ Forest cover", frame = FALSE, col = rgb(0,0,0,0.3), pch = 16) # Summary set of plots par(mfrow = c(2, 3), mar = c(2,2,4,6)) r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = BerneseOberland$elevation)) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Elevation (metres)") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = BerneseOberland$forest)) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Forest cover (%)") r <- raster::rasterFromXYZ(data.frame(x = s$gr[,1], y = s$gr[,2], z = c(s$field))) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Spatial effect (neg. exp. corr.)") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = z)) raster::plot(r, col = c("white", "black"), axes = FALSE, box = FALSE, main = "Presence/absence (z)") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = apply(p, 1, mean))) raster::plot(r, col = topo.colors(20), axes = FALSE, box = FALSE, main = "Average detection probability") r <- raster::rasterFromXYZ(data.frame(x = BerneseOberland$x, y = BerneseOberland$y, z = apply(y, 1, max))) raster::plot(r, col = c("white", "black"), axes = FALSE, box = FALSE, main = "Observed presence/absence (max(y))\n with surveyed sites") points(BerneseOberland$x[surveyed.sites], BerneseOberland$y[surveyed.sites], pch = 16, col = "red", cex = 0.8) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list( # ------------------- arguments input ---------------------- nsurveys = nsurveys, mean.psi = mean.psi, beta = beta, mean.p = mean.p, alpha = alpha, sample.size = sample.size, variance.RF = variance.RF, theta.RF = theta.RF, seeds = seeds, # ------------------- from BerneseOberland ---------------------- xcoord = BerneseOberland$x, ycoord = BerneseOberland$y, elevation = BerneseOberland$elevation, forest = BerneseOberland$forest, elevationS = elev, forestS = forest, # ------------------- generated variables ------------------------ wind = wind, field = s$field, alpha0 = alpha0, beta0 = beta0, psi = psi, z = z, trueNocc = trueNocc, obsNocc = obsNocc, true_psi_fs = true_psi_fs, obs_psi_fs = obs_psi_fs, p = p, y = y, surveyed.sites = surveyed.sites, yobs = yobs)) } # End of function definition
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simOccSpatial_AHM2_9.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simOcc - AHM1 section 10.5 p577 # Function to simulate data for static occupancy models under wide range of conditions # (introduced in AHM1 Section 10.5) simOcc <- function(M = 267, J = 3, mean.occupancy = 0.6, beta1 = -2, beta2 = 2, beta3 = 1, mean.detection = 0.3, time.effects = c(-1, 1), alpha1 = -1, alpha2 = -3, alpha3 = 0, sd.lp = 0.5, b = 2, show.plots = TRUE){ # # Written by Marc Kery, 21 March 2015 # # Function to simulate occupancy measurements replicated at M sites # during J occasions. # Population closure is assumed for each site. # Expected occurrence may be affected by elevation (elev), # forest cover (forest) and their interaction. # Expected detection probability may be affected by elevation, # wind speed (wind) and their interaction. # Function arguments: # M: Number of spatial replicates (sites) # J: Number of temporal replicates (occasions) # mean.occupancy: Mean occurrence at value 0 of occurrence covariates # beta1: Main effect of elevation on occurrence # beta2: Main effect of forest cover on occurrence # beta3: Interaction effect on occurrence of elevation and forest cover # mean.detection: Mean detection prob. at value 0 of detection covariates # time.effects (on logit scale): bounds for uniform distribution from # which time effects gamma will be drawn # alpha1: Main effect of elevation on detection probability # alpha2: Main effect of wind speed on detection probability # alpha3: Interaction effect on detection of elevation and wind speed # sd.lp: standard deviation of random site effects (on logit scale) # b: constant value of 'behavioural response' leading to 'trap-happiness' # (if b > 0) or 'trap shyness' (if b < 0) # show.plots: if TRUE, plots of the data will be displayed; # IMPORTANT: has to be set to FALSE if you are running simulations. if(FALSE) x <- NULL # Fudge to stop R CMD check complaining about curve # Checks and fixes for input data ----------------------------- M <- round(M[1]) J <- round(J[1]) stopifnotProbability(mean.occupancy) stopifnotProbability(mean.detection) stopifNegative(sd.lp) # -------------------------------------------- # Create some data structures: observed data and 3 versions of p matrix y <- p <- p0 <- p1 <- array(NA, dim = c(M,J)) # Create data structures # Create 2 site covariates (elev, forest) and 1 obs. covar. (wind) elev <- runif(n = M, -1, 1) # Scaled elevation forest <- runif(n = M, -1, 1) # Scaled forest cover wind <- array(runif(n = M*J, -1, 1), dim = c(M, J)) # Scaled wind speed # Model for occurrence (presence/absence): simulate system state z beta0 <- qlogis(mean.occupancy) # Mean occurrence on link scale psi <- plogis(beta0 + beta1*elev + beta2*forest + beta3*elev*forest) z <- rbinom(n = M, size = 1, prob = psi) # Realised occurrence (true state) # Plots for system state moved to line # Model for observations: simulate observations y, given system state z alpha0 <- qlogis(mean.detection) # mean detection on link scale gamma <- runif(J, time.effects[1], time.effects[2]) # (fixed) time effects eps <- rnorm(M, 0, sd.lp) # Individual (random) effects # Generate two full detection probability arrays # p0: for no preceding capture, p1: for preceding capture event for(j in 1:J){ p0[,j] <- plogis(alpha0 + gamma[j] + alpha1*elev + alpha2*wind[,j] + alpha3*elev*wind[,j] + eps) # p when not captured at occasion j-1 p1[,j] <- plogis(alpha0 + gamma[j] + alpha1*elev + alpha2*wind[,j] + alpha3*elev*wind[,j] + eps + b) # p when captured at occasion j-1 } # Generate detection/nondetection data: the measurements of presence/absence # For the first capture occasion (no behavioural response possible) p[,1] <- p0[,1] # Write the detection probability matrix p y[,1] <- rbinom(n = M, size = z, prob = p0[,1]) # y[,1] <- rbinom(n = M, size = 1, prob = z * p0[,1]) # SAME # Later capture occasions (potentially with contribution of b) for (j in 2:J){ for(i in 1:M){ p[i,j] <- (1-y[i,(j-1)])*p0[i,j] + y[i,(j-1)] * p1[i,j] # which p? y[i,j] <- rbinom(n = 1, size = 1, prob = z[i] * p[i,j]) } } # True and observed measures of 'distribution' sumZ <- sum(z) # Total occurrence (all sites) sumZ.obs <- sum(apply(y,1,max)) # Observed number of occ sites psi.fs.true <- sum(z) / M # True proportion of occ. sites in sample psi.fs.obs <- mean(apply(y,1,max)) # Observed proportion of occ. sites in sample if(show.plots){ # Restore graphical settings on exit ------------------------- oldpar <- par("mfrow", "cex.main", "cex.lab", "mar") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # ------------------------------------------------------------ tryPlot <- try( { # Plots for system state par(mfrow = c(2, 2), cex.main = 1) curve(plogis(beta0 + beta1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "psi", lwd = 2) plot(elev, psi, frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "") curve(plogis(beta0 + beta2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Forest cover", ylab = "psi", lwd = 2) plot(forest, psi, frame.plot = FALSE, ylim = c(0, 1), xlab = "Forest cover", ylab = "") # Plots for observation process par(mfrow = c(2, 3), cex.main = 1.2, cex.lab = 1.5, mar = c(5,5,3,2)) # Plots for elevation, time, 'heterogeneity', and 'behavioural response' # Plots for elevation and time curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Expected detection (p)", lwd = 2, main = "Effects of elev and time") for(j in 1:J){ curve(plogis(alpha0 + gamma[j] + alpha1*x),-1,1,lwd = 1, col="grey", add=TRUE) } # Plots for elevation and 'heterogeneity' curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Elevation", ylab = "Expected detection (p)", lwd = 2, main = "Effects of elev and site heterogeneity") for(i in 1:M){ curve(plogis(alpha0 + eps[i] + alpha1*x),-1,1,lwd = 1, col="grey", add=T) } curve(plogis(alpha0 + alpha1*x), -1, 1, col = "red", lwd = 2, add = TRUE) # Plot for elevation and 'behavioural response' p0plot <- p0 p1plot <- p1 ; p1plot[,1] <- NA for(j in 2:J){ p0plot[,j] <- p0plot[,j] / (1 - y[,(j-1)]) # NA out some p1plot[,j] <- p1plot[,j] / y[,(j-1)] # NA out some } matplot(elev, p0plot, xlab = "Elevation", ylab = "Detection (p)", main = "p ~ elevation at actual wind speed \n(red/blue - following/not following det.)", pch = 1, ylim = c(0,1), col = "blue", frame.plot = FALSE) if(sum(is.finite(p1plot)) > 0) matplot(elev, p1plot, pch = 16, col = "red", add = TRUE) # Plots for wind speed, time, 'heterogeneity', and 'behavioural response' # Plots for elevation and time curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Wind speed", ylab = "Expected detection (p)", lwd = 2, main = "Effects of wind and time") for(j in 1:J){ curve(plogis(alpha0 + gamma[j] + alpha2*x),-1,1,lwd = 1, col="grey", add=TRUE) } # Plots for wind speed and 'heterogeneity' curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", frame.plot = FALSE, ylim = c(0, 1), xlab = "Wind speed", ylab = "Expected detection (p)", lwd = 2, main = "Effects of wind and site heterogeneity") for(i in 1:M){ curve(plogis(alpha0 + eps[i] + alpha2*x),-1,1,lwd = 1, col="grey", add=TRUE) } curve(plogis(alpha0 + alpha2*x), -1, 1, col = "red", lwd = 2, add = TRUE) # Plot for wind speed and 'behavioural response' matplot(wind, p0plot, xlab = "Wind speed", ylab = "Detection (p)", main = "p ~ elevation at actual elevation \n(red/blue - following/not following det.)", pch = 1, ylim = c(0,1), col = "blue", frame.plot = FALSE) if(sum(is.finite(p1plot)) > 0) matplot(wind, p1plot, pch = 16, col = "red", add = TRUE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list(M = M, J = J, mean.occupancy = mean.occupancy, beta0 = beta0, beta1 = beta1, beta2 = beta2, beta3 = beta3, mean.detection = mean.detection, time.effects = time.effects, gamma = gamma, alpha0 = alpha0, alpha1 = alpha1, alpha2 = alpha2, alpha3 = alpha3, sd.lp = sd.lp, eps = eps, b = b, elev = elev, forest = forest, wind = wind, psi = psi, z = z, p = p, p0 = p0, p1 = p1, y = y, sumZ = sumZ, sumZ.obs = sumZ.obs, psi.fs.true = psi.fs.true, psi.fs.obs = psi.fs.obs)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simOcc_AHM1_10-5_Simulate_static_occupancy.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simOccttd - AHM1 section 10.12.1 p616 # Function simulates time-to-detection occupancy design data under model # of Garrard et al. (Austral Ecology, 2008), also see Bornand et al. (MEE, 2014) # (introduced in AHM1 Section 10.12.1) simOccttd <- function(M = 250, mean.psi = 0.4, mean.lambda = 0.3, beta1 = 1, alpha1 = -1, Tmax = 10, show.plot = TRUE, verbose = TRUE){ # # Function simulates time-to-detection occupancy design data under model # of Garrard et al. (Austral Ecology, 2008), also see Bornand et al. (MEE, 2014) # # Written by Marc Kery, 24 April 2015 # # Function arguments: # M: Number of sites # mean.psi: intercept of occupancy probability # mean.lambda: intercept of Poisson rate parameter # beta1: slope of continuous covariate B on logit(psi) # alpha1: slope of continuous covariate A on log(lambda) # Tmax: maximum search time (in arbitrary units, which are same as response) # response will be censored at Tmax # Checks and fixes for input data ----------------------------- M <- round(M[1]) stopifnotProbability(mean.psi) stopifNegative(mean.lambda, allowZero=FALSE) stopifNegative(Tmax, allowZero=FALSE) # -------------------------------------------- # Generate covariate values covA <- rnorm(M) covB <- rnorm(M) # Ecological process: Simulate occurrence z at each site psi <- plogis(qlogis(mean.psi) + beta1 * covB) (z <- rbinom(M, 1, psi)) # Realized occurrence at each site if(verbose) cat(" Number of occupied sites ( among", M ,"):", sum(z), "\n") # Observation process: Simulate time-to-detection (ttd) at each site # Start without censoring lambda <- exp(log(mean.lambda) + alpha1 * covA) (ttd.temp <- rexp(M, lambda)) # ttd conditional on site occupied # Now add two sources of censoringt censoring ttd <- ttd.temp ttd[z == 0] <- NA # Censored if unoccupied ttd[ttd.temp >= Tmax] <- NA # Censored if ttd >= Tmax if(show.plot) { tryPlot <- try( { hist(ttd, breaks = length(ttd)/3, col = "gold", main = "Observed distribution of time to detection\n(censored cases (red line) excluded)", xlim = c(0, Tmax), xlab = "Measured time to detection") abline(v = Tmax, col = "red", lwd = 3) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Number of sites where detected (n.obs <- sum(ttd < Tmax, na.rm = TRUE)) if(verbose) cat(" Number of sites at which detected:", n.obs, "\n") # Calculate censoring indicator d <- as.numeric(is.na(ttd)) if(verbose) cat(" Number of times censored:", sum(d), "\n") # Output return(list(M = M, mean.psi = mean.psi, mean.lambda = mean.lambda, beta1 = beta1, alpha1 = alpha1, Tmax = Tmax, covA = covA, covB = covB, psi = psi, lambda = lambda, z = z, ttd.temp = ttd.temp, ttd = ttd, d = d, sum.z = sum(z), n.obs = n.obs)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simOccttd_AHM1_10-12-1_Simulate_timeToDetection_occupancy.R
## AHM2 section 1.8.1 simPH <- function( # --- Sample sizes and design stuff --- npop = 18, # Number of populations nyears = 17, # Number of years (seasons) nreps = 10, # Number of surveys per year (season) date.range = 1:150, # Dates over which surveys may be conducted # --- Parameters of among-year dynamics --- initial.lambda = 300, # Poisson mean of initial population size gamma.parms = c(0, 0.3), # mean and sd of lognormal interannual productivity # --- Parameters of within-year dynamics --- mu.range = c(50, 80), # Range of date of peak flight period # (varies by site and year) sigma.range = c(10, 20), # Range of sigma of normal phenology curve # (varies by year only) # --- Parameters of observation process --- p.range = c(0.4, 0.6), # Range of detection probabilities # (varies by site, year and visit) # --- Switch for plotting --- show.plot = TRUE) # whether to browse plots or not # (should be set to FALSE when running sims) { # -------------------- Start of function code ----------------- # Function generates (insect) counts under a variant of a # 'phenomenological model' of Dennis et al. (JABES 2016). # # Interannual population model is exponential population growth, # with Poisson initial abundance governed by initial.lambda and # annually varying growth rate (or productivity parameter) gamma # # Within-year dynamics is described by a Gaussian curve with date of # mean flight period mu (site- and year-specific) and # length of flight period sigma (only year-specific). # # Counts are made subject to a detection probability (p), which varies # randomly according to a uniform distribution for every single count. # # Counts are plotted for up to 16 populations only. # Checks and fixes for input data ----------------------------- npop <- round(npop[1]) nyears <- round(nyears[1]) nreps <- round(nreps[1]) stopifnotInteger(date.range) stopifNegative(initial.lambda, allowZero=FALSE) stopifnotLength(gamma.parms, 2) stopifnotProbability(p.range) # --------------------------------------------------------------- # Simulate among-year population dynamics: exponential model for n n <- array(NA, dim = c(npop, nyears)) # Array for site-year abundance n[,1] <- rpois(npop, initial.lambda) gamma <- rlnorm(nyears-1, meanlog=gamma.parms[1], sdlog=gamma.parms[2]) for(t in 2:nyears){ n[,t] <- rpois(npop, n[,t-1] * gamma[t-1]) } # Simulate within-year population dynamics: Normal curve for counts C <- date <- lambda <- a <- array(NA, dim = c(npop, nyears, nreps)) # Arrays for # site-year-visit counts, survey dates, relative pop. size and detection probability mu <- array(NA, dim = c(npop, nyears)) # Array for value of peak flight period date # Select survey dates, peak flight period (mu_it), # length of flight period sigma(t) and compute relative pop. size (a), # expected population size (lambda) and realized counts (C) # Draw annual value of flight period length (sigma) sigma <- runif(nyears, min(sigma.range), max(sigma.range)) # Draw values of detection probability (p) p <- array(runif(prod(c(npop, nyears, nreps)), min(p.range), max(p.range)), dim = c(npop, nyears, nreps)) # Compute and assemble stuff at the scale of the individual visit for(i in 1:npop){ for(t in 1:nyears){ # Survey dates for this yr and pop: survey.dates <- sort(round( runif(nreps, min(date.range), max(date.range)))) date[i,t,] <- survey.dates # Save these survey dates mu[i,t] <- runif(1, min(mu.range), max(mu.range)) # Flight peak for(k in 1:nreps){ # a[i,t,k] <- (1 / (sigma[t] * sqrt(2 * pi)) ) * exp( -((date[i,t,k] - mu[i,t])^2) / (2 * sigma[t]^2) ) # Rel. population size a[i,t,k] <- dnorm(date[i,t,k], mu[i,t], sigma[t]) # Rel. population size lambda[i,t,k] <- n[i,t] * a[i,t,k] * p[i,t,k] # Expected counts C[i,t,k] <- rpois(1, lambda[i,t,k]) # Realized counts } } } if(show.plot) { # Restore graphical settings on exit ------------------------- oldpar <- par("mfrow", "mar") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) # ------------------------------------------------------------ # Simulate nice smooth normal curve for the plots nday <- length(date.range) aa <- ll <- array(NA, dim = c(npop, nyears, nday)) # Arrays pp <- array(runif(prod(c(npop, nyears, nday)), min(p.range), max(p.range)), dim = c(npop, nyears, nday)) for(i in 1:npop){ for(t in 1:nyears){ for(k in 1:nday){ aa[i,t,k] <- dnorm(date.range[k], mu[i,t], sigma[t]) # Relative population size ll[i,t,k] <- n[i,t] * aa[i,t,k] * pp[i,t,k] # Expected counts } } } # Graphical output tryPlot <- try( { # Plot population dynamics and plot of all population sizes par(mfrow = c(2,1), mar = c(5,4,3,1)) matplot(1:nyears, t(n), type = "l", lwd = 2, lty = 1, main = "Relative population size (n) for each population and year", ylab = "n", xlab = "Year", frame = FALSE, xaxt='n') tmp <- pretty(1:nyears) tmp[1] <- 1 axis(1, at=tmp) plot(table(n), xlab = 'Relative population size', ylab = 'Frequency', main = 'Frequency distribution of relative population size\nfor all sites and years', frame = FALSE) # Plot time-series of relative expected abundance for up to 16 populations par(mfrow = c(4,4), mar = c(5,4,3,1)) limit <- ifelse(npop < 17, npop, 16) for(i in 1:limit){ # Plot only for 4x4 populations matplot(date.range, t(aa[i,,]), type = "l", lty = 1, lwd = 2, ylim = c(0, max(aa[i,,])), xlab = "Date", ylab = "Rel. abundance", main = paste("Phenology in pop ", i, sep = ''), frame = FALSE) } # Plot time-series of relative expected abundance for up to 16 populations par(mfrow = c(4,4), mar = c(5,4,3,1)) limit <- ifelse(npop<17, npop, 16) for(i in 1:limit){ # Plot only for 4x4 populations matplot(date.range, t(ll[i,,]), type = "l", lty = 1, lwd = 2, ylim = c(0, max(ll[i,,])), xlab = "Date", ylab = "Exp. abundance", main = paste("Rel. exp. n in pop ", i, sep = ''), frame = FALSE) } # Plot time-series of counts (= relative, realized abundance) for up to 16 populations par(mfrow = c(4,4), mar = c(5,4,3,1)) limit <- ifelse(npop<17, npop, 16) for(i in 1:limit){ # Plot only for 4x4 populations matplot(t(date[i,,]), t(C[i,,]), type = "b", lty = 1, lwd = 2, ylim = c(0, max(C[i,,])), xlab = "Date", ylab = "Counts", main = paste("Pop ", i, "(mean n =", round(mean(n[i,])), ")"), frame = FALSE) } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Numerical output return(list( # ---------- arguments input -------------------------- npop = npop, nyears = nyears, nreps = nreps, date.range = date.range, initial.lambda = initial.lambda, gamma.parms = gamma.parms, mu.range = mu.range, sigma.range = sigma.range, p.range = p.range, # ------------ generated values ----------------------- # abundance gamma = gamma, # nyears-1 vector, change in abundance n = n, # site x year matrix, true abundance # phenology mu = mu, # site x year matrix, mean of the flight period sigma = sigma, # nyears vector, half-length of flight period # detection date = date, # site x year x nreps, dates of the surveys a = a, # site x year x nreps, phenology term lambda = lambda, # site x year x nreps, expected counts p = p, # site x year x nreps, probability of detection C = C)) # site x year x nreps, simulated counts } # -------------------- End of function definition -----------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simPH_AHM2_1-8-1.R
# AHM2 section 1.7.1 Simulation of a demographic state-space model # Called simpop in the draft # Define function to simulate the data # ----------------- Start function definition ------------------- simPOP <- function( M = 100, # number of sites T = 10, # number of years mean.lam = 3, # mean abundance for year 1 beta.lam = 0, # covariate coefficient for lambda sd.log.lam = 0, # over-dispersion in lambda mean.gamma = 1.0, # mean population growth rate beta.gamma = 0, # covariate coefficient for gamma sd.log.gamma.site = 0, # SD of site effects sd.log.gamma.time = 0, # SD of time effects sd.log.gamma.survey = 0, # SD of survey (site+time) effects sd.rho = 0, # random immigration term mean.p = 0.6, # mean detection probability beta.p = 0, # covariate coefficient for p sd.logit.p.site = 0, # SD of site effects sd.logit.p.time = 0, # SD of time effects sd.logit.p.survey = 0, # SD of survey effects show.plot = TRUE){ # controls plotting # Simulate multiple time-series of counts under a pure Markov model (with exponential population model) or # under an extended Markov model (with exponential-plus-random-immigration population model; # see Sollmann et al., Ecology, 2015) # Default is Markov model, setting sd.rho to a value greater than 0 changes to extended Markov and sets the amount of random immigration. # Checks and fixes for input data ----------------- M <- round(M[1]) T <- round(T[1]) stopifNegative(mean.lam) stopifNegative(sd.log.lam) stopifNegative(sd.log.gamma.site) stopifNegative(sd.log.gamma.time) stopifNegative(sd.log.gamma.survey) stopifNegative(sd.rho) stopifnotProbability(mean.p) stopifNegative(sd.logit.p.site) stopifNegative(sd.logit.p.time) stopifNegative(sd.logit.p.survey) # ----------------------------------------------------- # Create arrays needed (for observed counts, latent states, gamma, p # C <- N <- gamma <- p <- array(NA, dim = c(M, T)) # (Could also have this: gamma <- array(NA, dim = c(M, T-1))) Mike prefers this! C <- N <- p <- array(NA, dim = c(M, T)) gamma <- array(NA, dim = c(M, T-1)) # Assemble lambda Xsite1 <- runif(M, -1, 1) # Site covariate that affects initial abundance eps.N <- rnorm(M, 0, sd.log.lam) # Site overdispersion at t = 1 lambda <- exp(log(mean.lam) + beta.lam * Xsite1 + eps.N) # Assemble gamma (last column will remain NA) Xsiteyear1 <- matrix(runif(M*T, -1, 1), nrow = M, ncol = T) # Yearly site covariate that affects gamma eps.gamma.site <- rnorm(M, 0, sd.log.gamma.site) # spatial (= site) effects in gamma eps.gamma.time <- rnorm(T, 0, sd.log.gamma.time) # temporal (=primary occasion) effects in gamma eps.gamma.survey <- matrix(rnorm(M*T, 0, sd.log.gamma.survey), nrow = M, ncol = T) # survey effects in gamma (i.e., site by occasion) for(t in 1:(T-1)){ gamma[,t] <- exp(log(mean.gamma) + beta.gamma * Xsiteyear1[,t] + eps.gamma.site + eps.gamma.time[t] + eps.gamma.survey[,t]) } # Draw values of random immigration rate (rho) if(sd.rho == 0){ # Markovian dynamics rho <- rep(0, T-1) } if(sd.rho > 0){ # Extended Markovian dynamics logrho <- rnorm(T-1, 0, sd.rho) rho <- exp(logrho) } # Assemble p Xsiteyear2 <- matrix(runif(M*T, -1, 1), nrow = M, ncol = T) # Yearly site covariate that affects p eps.p.site <- rnorm(M, 0, sd.logit.p.site) # spatial (= site) effects in p eps.p.time <- rnorm(T, 0, sd.logit.p.time) # temporal (=primary occasion) effects in p eps.p.survey <- matrix(rnorm(M*T, 0, sd.logit.p.survey), nrow = M, ncol = T) # survey effects in p (i.e., site by occasion) for(t in 1:T){ p[,t] <- plogis(qlogis(mean.p) + beta.p * Xsiteyear2[,t] + eps.p.site + eps.p.time[t] + eps.p.survey[,t]) } # Simulate initial state N[,1] <- rpois(M, lambda) # Simulate later states for(t in 2:T){ N[,t] <- rpois(M, N[,t-1] * gamma[,t-1] + rho[t-1]) } # Simulate binomial observation for(t in 1:T){ C[,t] <- rbinom(M, N[,t], p[,t]) } # Tally up number of extinct populations and compute extinction rate Nextinct <- sum(N[,T] == 0) extrate <- Nextinct / M # Tally up number of years in which a pop is at zero zeroNyears <- sum(N == 0) # Add up annual total population size across all sites sumN <- apply(N, 2, sum) # Compute realized population growth rate based on total, realized population size per year gammaX <- numeric(T-1) for(t in 2:T){ gammaX[t-1] <- sumN[t] / sumN[t-1] } # Graphical output if(show.plot) { # Restore graphical settings on exit oldpar <- par("mfrow", "mar") oldAsk <- devAskNewPage(ask = dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) tryPlot <- try( { par(mfrow = c(1,3)) hist(lambda, breaks = 100, main = 'lambda', col = 'grey') hist(gamma, breaks = 100, main = 'gamma', col = 'grey') hist(p, breaks = 100, main = 'p', col = 'grey') par(mfrow = c(1, 3)) hist(N, breaks = 100, main = 'N', col = 'grey') hist(C, breaks = 100, main = 'C', col = 'grey') plot(N, C, xlab = 'True N', ylab = 'Observed C', frame = FALSE) abline(0,1) par(mfrow = c(2, 2)) ylim <- range(c(N, C)) matplot(t(N), type = 'l', lty = 1, main = 'Trajectories of true N', frame = FALSE, ylim = ylim) matplot(t(C), type = 'l', lty = 1, main = 'Trajectories of observed C', frame = FALSE, ylim = ylim) plot(table(N[,1]), main = 'Initial N', frame = FALSE) plot(table(N[,T]), main = 'Final N', frame = FALSE) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Numeric output return(list( # ----------------- arguments input ---------------------- M = M, T = T, mean.lam = mean.lam, beta.lam = beta.lam, sd.log.lam = sd.log.lam, mean.gamma = mean.gamma, beta.gamma = beta.gamma, sd.log.gamma.site = sd.log.gamma.site, sd.log.gamma.time = sd.log.gamma.time, sd.log.gamma.survey = sd.log.gamma.survey, sd.rho = sd.rho, mean.p = mean.p, beta.p = beta.p, sd.logit.p.site = sd.logit.p.site, sd.logit.p.time = sd.logit.p.time, sd.logit.p.survey = sd.logit.p.survey, # ------------------ generated values ---------------------- Xsite1 = Xsite1, # M vector, site covariate affecting initial abundance Xsiteyear1 = Xsiteyear1, # MxT matrix, yearly site covariate affecting gamma Xsiteyear2 = Xsiteyear2, # MxT matrix, yearly site covariate affecting p eps.N = eps.N, # M vector, site overdispersion at t = 1 lambda = lambda, # M vector, abundance in year 1 eps.gamma.site = eps.gamma.site, # M vector, random site effect for gamma eps.gamma.time = eps.gamma.time, # T vector, random time effect for gamma eps.gamma.survey = eps.gamma.survey, # MxT matrix, random survey effect for gamma gamma = gamma, # MxT matrix, population growth rate rho = rho, # T-1 vector, immigration rate eps.p.site = eps.p.site, # M vector, random site effect for p eps.p.time = eps.p.time, # T vector, random time effect for p eps.p.survey = eps.p.survey, # MxT matrix, random survey effect for p p = p, # MxT matrix, detection probability N = N, # MxT matrix, true population C = C, # MxT matrix, counts zeroNyears = zeroNyears, # scalar, sum(N == 0) Nextinct = Nextinct, # scalar, number of sites where N ==0 at time T extrate = extrate, # scalar, proportion of sites where N ==0 at time T sumN = sumN, # T vector, total population in each year gammaX = gammaX)) # T-1 vector, realized population growth rate } # ----------------- End function definition -------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simPOP_AHM2_1-7-1.R
# AHM2 Chapter 10 simPPe <- function(lscape.size = 150, buffer.width = 25, variance.X = 1, theta.X = 10, M = 250, beta = 1, quads.along.side = 6, show.plots = TRUE) { # # Name means 'SIMulate Point Pattern Educational version' # Function to simulate spatial point pattern # in a heterogeneous landscape simulated on a square grid. # The study area ('core') is simulated inside of a larger landscape that # includes a buffer. The size of the core is defined by the lscape.size # minus twice the buffer. # There is one habitat covariate X that affects the intensity # of the points. # X is spatially structured with negative exp. spatial autocorrelation; # the parameters of the field can be chosen to create large # 'islands' of similar values or no 'islands' at all, # in which case the field is spatially unstructured. # # The intensity of STATIC points (e.g. home-range centers) # may be inhomogeneous and affected by the coefficient beta, # which is the log-linear effect of X. # # *** Function arguments *** # lscape.size: total side length of simulated landscape (length in units, # this includes a buffer) # buffer.width: width of buffer around core study area # variance.X: variance of Gaussian random field (covariate X) # theta.X: scale parameter of correlation in field (must be >0) # M: Expected number of activity centers in core area # beta: coefficient of the habitat # quads.along.side: number of quadrats along the side of the core area # (this is the parameter of the gridding process # and determines the size of the quadrats) # -------------- Check and fix input ----------------------- buffer.width <- round(buffer.width[1]) stopifNegative(buffer.width) quads.along.side <- round(quads.along.side[1]) stopifNegative(quads.along.side, allowZero=FALSE) lscape.size <- round(lscape.size[1]) stopifnotGreaterthan(lscape.size, 2 * buffer.width + quads.along.side - 1) variance.X <- variance.X[1] stopifNegative(variance.X) theta.X <- theta.X[1] stopifNegative(theta.X) M <- round(M[1]) stopifNegative(M) # ------------------------------------------------------------ # --------------- Define basic geometry of simulation -------------- # # Size of core study area (the 'core') and its proportion of total landscape area size.core <- lscape.size - 2 * buffer.width prop.core <- size.core^2/lscape.size^2 # ratio core area / total area # Discrete approximation of total landscape pixel.size <- 1 # length of side of square pixel of simulated landscape # Coordinates (mid-points of basic pixel unit of simulation) and lscape x <- seq(1, lscape.size, pixel.size)-0.5 # x coordinate of pixels y <- seq(1, lscape.size, pixel.size)-0.5 # y coordinate of pixels grid <- as.matrix(expand.grid(x,y)) # resulting grid # Compute lambda of point pattern: limit of expected number of points per areal unit, when latter goes towards zero lambda_pp <- M / size.core^2 # Define a core area in the middle of the square # This core is then divided up to define a number of quadrats # within which abundance and occurrence are measured quad.size <- size.core / quads.along.side breaks <- seq(buffer.width, size.core+buffer.width, by = quad.size) # boundaries of quadrats mid.pt <- breaks[-length(breaks)] + 0.5 * quad.size # quadrat mid-points core <- range(breaks) # range of x and y coordinates in the core nsite <- length(mid.pt)^2 # Simulate habitat covariate: a spatially correlated Gaussian random field (i.e., a Gaussian random field with negative exponential corr.) #if(requireNamespace("RandomFields", quietly=TRUE)) { # RandomFields::RFoptions(seed=NA) # field <- matrix(RandomFields::RFsimulate(RandomFields::RMexp(var = variance.X, scale = theta.X), # x=x, y=y, grid=TRUE)@data$variable1, ncol = lscape.size) # MVN r.v. with spatial correlation #} else { message("Using package 'fields' instead of 'RandomFields'; see help(simPPe).") obj <- circulantEmbeddingSetup(grid=list(x=x, y=y), Covariance="Exponential", aRange=theta.X) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'lscape.size' or 'theta.X'.") field <- matrix(tmp * sqrt(variance.X), ncol = lscape.size) #} # --------------- Simulate points in the field -------------------- # # Simulate binomial point process for activity centers M2 <- round(M/prop.core) # Number of individuals in the total landscape # (incl. buffer) # Simulate point locations as function of habitat covariate x probtemp <- exp(beta[1]*c(field)) # log-linear model for intensity on X probs <- probtemp / sum(probtemp) # normalize to get probability of getting a point in a pixel pixel.id <- sort(sample(1:lscape.size^2, M2 , replace=TRUE, prob=probs)) # Simulate locations randomly within the pixel (unlike sim.spatialDS) u1 <- grid[pixel.id,1] + runif(M2, -pixel.size/2, pixel.size /2) u2 <- grid[pixel.id,2] + runif(M2, -pixel.size/2, pixel.size /2) u <- cbind(u1, u2) # collect AC coordinates together # ------ Summarization of point pattern within quadrats ------------- # # This is INSIDE of the observation core # # Summarization for abundance (N) at every quadrat Nac <- as.matrix(table(cut(u[,1], breaks=breaks), cut(u[,2], breaks= breaks))) # quadrat-specific abundance for AC E_N <- round(mean(Nac),2) # average realized abundance per quadrat # Summarization for presence/absence (z) at every quadrat zac <- Nac ; zac[zac>1] <- 1 # quadrat-specific occurrence for AC E_z <- round(mean(zac), 2) # proportion occupied quadrats # ------------------ Visualizations --------------------------- if(show.plots) { oldpar <- par(mfrow = c(1, 3), mar = c(4,2,5,2), cex.main = 1.8, cex.axis = 1.2) ; on.exit(par(oldpar)) tryPlot <- try( { # *** Fig. 1: Original point pattern # Random field of X with activity-centers overlaid image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Point pattern with\ncore and buffer area", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste("Mean intensity (lambda) =", round(lambda_pp, 5)), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # points(u1, u2, pch=20, col='black', cex = 1.2) # plot points # *** Fig. 2: Show abundance and presence/absence in each quadrat on original landscape *** # Covariate 1: the Gaussian random field with autocorrelation # Reproduce random field with activity centers image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Abundance, N", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste0("Mean(N) = ", E_N, ", var(N) = ", round(var(c(Nac)), 2)), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) # Add activity centers points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # Overlay survey quadrats for(i in 1:length(breaks)){ for(k in 1:length(breaks)){ segments(breaks[i], breaks[k], rev(breaks)[i], breaks[k]) segments(breaks[i], breaks[k], breaks[i], rev(breaks)[k]) } } # Print abundance into each quadrat for(i in 1:length(mid.pt)){ for(k in 1:length(mid.pt)){ text(mid.pt[i], mid.pt[k], Nac[i,k], cex =4^(0.8-0.5*log10(quads.along.side)), col='red') } } # Figure 3 for presence/absence of activity centers (= distribution) # Reproduce random field with activity centers image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Occurrence, z", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste("Mean(z) =", E_z), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) # Add activity centers points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # Overlay quadrats for(i in 1:length(breaks)){ for(k in 1:length(breaks)){ segments(breaks[i], breaks[k], rev(breaks)[i], breaks[k]) segments(breaks[i], breaks[k], breaks[i], rev(breaks)[k]) } } # Print presence/absence into each quadrat for(i in 1:length(mid.pt)){ for(k in 1:length(mid.pt)){ text(mid.pt[i], mid.pt[k], zac[i,k], cex =4^(0.8-0.5*log10(quads.along.side)), col='red') } } # Mike: Shade UNoccupied quadrats (which have abundance N = 0 or occurrence z = 0) for(i in 1:(length(breaks)-1)){ for(k in 1:(length(breaks)-1)){ if(zac[i,k] == 1) # grey-out UNoccupied quads next polygon(c(breaks[i], breaks[i+1], breaks[i+1], breaks[i]), c(breaks[k], breaks[k], breaks[k+1], breaks[k+1]), col = adjustcolor("black", 0.6)) } } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Numerical output return(list( # ----------------- arguments input ----------------------- grid.size = lscape.size, buffer.width = buffer.width, variance.X = variance.X, theta.X = theta.X, M = M, beta = beta, quads.along.side = quads.along.side, # ---------------- generated values ------------------------- core = core, # range of x and y coordinates in the 'core' M2 = M2, # Number of ACs in the total landscape (incl. buffer) grid = grid, # Coordinates of the centre of each pixel. pixel.size = pixel.size,# 1; length of side of square pixel of landscape size.core = size.core, # the width = height of the core area prop.core = prop.core, # proportion of the landscape in the core X = field, # lscape.size x lscape.size matrix of covariate values for each pixel probs = probs, # corresponding matrix of probability of AC in pixel (sums to 1) pixel.id = pixel.id, # M2 vector, which pixel each AC is inside. u = u, # M2 x 2 matrix, coordinates of each AC nsite = nsite, # total number of quadrats in the core quad.size = quad.size, # width = height of each quadrat breaks = breaks, # boundaries of each quadrat mid.pt = mid.pt, # mid=points of each quadrat lambda_pp = lambda_pp, # intensity of point pattern (ACs per unit area) Nac = Nac, # matrix, quads.along.side x quads.along.side, site-specific abundance of ACs zac = zac, # matrix, quads.along.side x quads.along.side, 0/1 occurrence E_N = E_N, # scalar, average realized abundance per quadrat. E_z = E_z)) # scalar, average realized occupancy per quadrat. } # ------------ End of function definition --------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simPPe_AHM2_10.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # sim.pdata - AHM1 section 8.2.5.1 p410 # Function to simulate non-hierarchical point transect (= point count) data # (introduced in AHM1 Section 8.2.5.1) sim.pdata <- function(N=1000, sigma=1, B=3, keep.all=FALSE, show.plot=TRUE) { # Function simulates coordinates of individuals on a square # Square is [0,2*B] x[0,2*B], with a count location on the center # point (B,B) # Function arguments: # N: total population size in the square # sigma: scale of half-normal detection function # B: circle radias # keep.all: return the data for y = 0 individuals or not if(FALSE) x <- NULL # Kludge to keep R CMD check happy with curve # Checks and fixes for input data ----------------------------- N <- round(N[1]) stopifNegative(sigma, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- # Simulate and plot simulated data u1 <-runif(N, 0, 2*B) # (u1,u2) coordinates of N individuals u2 <- runif(N, 0, 2*B) d <- sqrt((u1 - B)^2 + (u2 - B)^2) # distance to center point of square N.real <- sum(d<= B) # Population size inside of count circle # Can only count indidividuals in the circle, so set to zero detection probability of individuals in the corners (thereby truncating them): p <- ifelse(d < B, 1, 0) * exp(-d*d/(2*(sigma^2))) # Now we decide whether each individual is detected or not y <- rbinom(N, 1, p) if(show.plot) { op <- par(mfrow = c(1,2)) ; on.exit(par(op)) # Plot the detection function tryPlot <- try( { curve(exp(-x^2/(2*sigma^2)), 0, B, xlab="Distance (x)", ylab="Detection prob.", lwd = 2, main = "Detection function", ylim = c(0,1)) text(0.8*B, 0.9, paste("sigma:", sigma)) plot(u1, u2, asp = 1, pch = 1, main = "Point transect") points(u1[d <= B], u2[d <= B], pch = 16, col = "black") points(u1[y==1], u2[y==1], pch = 16, col = "blue") points(B, B, pch = "+", cex = 3, col = "red") plotrix::draw.circle(B, B, B) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Put all of the data in a matrix: # (note we don't care about y, u, or v normally) if(!keep.all){ u1 <- u1[y==1] u2 <- u2[y==1] d <- d[y==1] } return(list(N=N, sigma=sigma, B=B, u1=u1, u2=u2, d=d, y=y, N.real=N.real)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simPdata_AHM1_8-2-5-1_Simulate_point_transect_data.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # simpleNmix - AHM1 section 6.12 p298 # Function to generate Nmix data under a time-for-space substitution design # (introduced in AHM1 Section 6.12) # Define function to simulate such data simpleNmix <- function(nyears = 12, nreps = 4, beta0 = 2, beta1 = 0.1, alpha0 = 0.5, alpha1 = -0.1, alpha2 = 1, show.plot = TRUE){ # Simple function simulates data under binomial N-mixture model where you have # a single site that is survyed over 'nyears' primary sampling periods # ('seasons', 'years'), within which there are 'nreps' secondary samples each # alpha0, alpha1 are the logit-linear coefficients of detection (p) on Time # and on a survey-specific covariate such as temperature (temp). # beta0 and beta1 are the log-linear coefficients of expected abundance # (lambda) on Time. if(FALSE) x <- NULL # Fix issues with 'curve' # Checks and fixes for input data ----------------------------- nyears <- round(nyears[1]) nreps <- round(nreps[1]) # -------------------------------------------- Time <- 1:nyears temp <- matrix(runif(nyears*nreps, -2, 2), ncol = nreps) N <- rpois(n = nyears, lambda = exp(beta0 + beta1 * Time)) C <- array(NA, dim = c(nyears, nreps)) p <- plogis(alpha0 + alpha1*Time + alpha2*temp) for(j in 1:nreps){ C[,j] <- rbinom(n = nyears, size = N, prob =p[,j]) } if(show.plot) { op <- par(mfrow = c(3, 2)) ; on.exit(par(op)) curve(exp(beta0 + beta1 * x), 1, nyears, main = "Expected abundance (lambda) over time", frame = FALSE, lwd = 2, ylab = "lambda", xlab = "Time") plot(Time, N, main = "Realized abundance (N) over time", frame = FALSE) curve(plogis(alpha0 +alpha1 * x), 1, nyears, main = "p over time", frame = FALSE, lwd = 2, xlab = "Time", ylab = "p (at averate temp)") matplot(Time, C, main = "Counts (C) over time", frame = FALSE) curve(plogis(alpha0 + alpha2 * x), -2, 2, main = "p vs. Temperature", frame = FALSE, lwd = 2, xlab = "Temperature", ylab = "p (at start of study)") matplot(temp, C, main = "Counts (C) over time", frame = FALSE) } return(list(nyears=nyears, nreps=nreps, beta0=beta0, beta1=beta1, alpha0=alpha0, alpha1=alpha1, alpha2=alpha2, N=N, C=C, Time=Time, temp = temp, p = p)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simPleNmix_AHM1_6-12_Simulate_Nmixture_data.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # sim.spatialDS - AHM1 section 9.8.3 p534 # Function generates data under spatial hierarchical distance sampling model # (introduced in AHM1 Section 9.8.3) # Changes April/May 2017 (v. 0.1.3.9000/9001) # Added lambda # Added logit detection function, removed hazard # Added useHabitat option to simulate new detection data with old Habitat values sim.spatialDS <- function(N=1000, beta = 1, sigma=1, keep.all=FALSE, B=3, model=c("logit", "halfnorm"), lambda = B/3, useHabitat, show.plot=TRUE){ # Function simulates coordinates of individuals on a square # Square is [0,2B] x [0,2B], with a count location on the point (B, B) # N: total population size in the square # beta: coefficient of SOEMTHING on spatial covariate x # sigma: scale of half-normal detection function # keep.all: return the data for all individuals, whether detected or not # Checks and fixes for input data ----------------------------- N <- round(N[1]) stopifNegative(sigma, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) # -------------------------------------------- model <- match.arg(model) # Create coordinates for 30 x 30 grid delta <- (2*B-0)/30 # '2D bin width' grx <- seq(delta/2, 2*B - delta/2, delta) # mid-point coordinates gr <- expand.grid(grx, grx, KEEP.OUT.ATTRS = FALSE) # Create grid coordinates if(missing(useHabitat)) { # Create spatially correlated covariate x V <- exp(-e2dist(gr,gr)/lambda) x <- t(chol(V))%*%rnorm(900) } else { x <- useHabitat$Habitat # Check input if(is.null(x) || is.null(dim(x)) || dim(x)[2] != 1 || dim(x)[1] != 900) stop("useHabitat is not valid output from sim.spatialDS.") } # Simulate point locations as function of habitat covariate x probs <- exp(beta*x)/sum(exp(beta*x)) # probability of point in pixel (sum = 1) pixel.id <- sample(1:900, N, replace=TRUE, prob=probs) # could simulate randomly within the pixel but it won't matter so place centrally u1 <- gr[pixel.id,1] u2 <- gr[pixel.id,2] d <- sqrt((u1 - B)^2 + (u2-B)^2) # distance to center point of square #plot(u1, u2, pch = 1, main = "Point transect") N.real <- sum(d <= B) # Population size inside of count circle # Can only count individuals in the circle, so set to zero detection probability of individuals in the corners (thereby truncating them) # p <- ifelse(d < B, 1, 0) * exp(-d*d/(2*(sigma^2))) # We do away with the circle constraint here. if(model=="halfnorm") p <- exp(-d*d/(2*sigma*sigma)) if(model=="logit") p<- 2*plogis( -d*d/(2*sigma*sigma) ) # Now we decide whether each individual is detected or not y <- rbinom(N, 1, p) # detected or not if(show.plot) { op <- par(mar=c(3,3,3,6)) ; on.exit(par(op)) tryPlot <- try( { image(rasterFromXYZ(cbind(as.matrix(gr),x)), col=topo.colors(10), asp=1, bty='n') # need to convert gr to a matrix rect(0, 0, 2*B, 2*B) # draw box around the image # draw.circle(B, B, B) points(B, B, pch="+", cex=3) image_scale(x, col=topo.colors(10)) # points(u1, u2, pch=20, col='black', cex = 0.8) # plot points title("Extremely cool figure") # express your appreciation of all this # points(u1[d <= B], u2[d <= B], pch = 16, col = "black", cex = 1) # in circle but not detected points(u1, u2, pch = 16, col = c("black", "red")[y+1]) # in circle but not detected # points(u1[y==1], u2[y==1], pch = 16, col = "red", cex = 1) # detected }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Remove data for individuals not detected if(!keep.all){ u1 <- u1[y==1] u2 <- u2[y==1] d <- d[y==1] pixel.id <- pixel.id[y==1] } # Output return(list(model=model, N=N, beta=beta, B=B, u1=u1, u2=u2, d=d, pixel.id=pixel.id, y=y, N.real=N.real, Habitat=x, grid=gr)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simSpatialDS_AHM1_9-8-3_Simulate_spatial_DS.R
# Function for AHM2 Chapter 11, section 11.5 simSpatialDSline <- function(N=1000, beta = 1, sigma=0.25, alpha0 = -2, W=1/2, L = 4, perp=FALSE, show.plots=TRUE){ # N: total population size in the rectangle # beta: coefficient of SOMETHING on spatial covariate x # sigma: scale of half-normal detection function # W : truncation distance = strip half-width. # Create coordinates rasterized transect delta<- 0.1 # '2D bin width' # Following code creates coordinates in order of "raster" # (not necessary here but helps keep things organized) gry <- seq(delta/2, W*2 - delta/2, delta) ny <- length(gry) grx <- seq(delta/2, L - delta/2, delta) nx <- length(grx) grx <- rep(grx, ny) gry <- rev(sort(rep(gry,nx))) gr <- cbind(grx,gry) # Create spatially correlated covariate x and plot it V <- exp(-e2dist(gr,gr)/1) x <- t(chol(V))%*%rnorm(nrow(gr)) r <- rasterFromXYZ(cbind(gr,x)) # Simulate point locations as function of habitat covariate x probs <- exp(beta*x)/sum(exp(beta*x)) # probability of point in pixel (sum = 1) pixel.id <- sample(1:nrow(gr), N, replace=TRUE, prob=probs) # could simulate randomly within the pixel but it won't matter so place centrally u1 <- gr[pixel.id,1] u2 <- gr[pixel.id,2] # points(u1, u2, pch=20, col='black', cex = 1) # plot points ### some pixels have >1 animal # title("Transect HDS") line.pts <- seq(0.01, L-0.01, .02) d.to.trap <- trap.ind <- rep(NA, N) traps <- cbind(line.pts, 0.5) # trap locations, spaced at 0.02 all along the line # Put together first part of output list outbasic <- list( # --------- arguments entered ------------------- N=N, beta=beta, sigma=sigma, alpha0=alpha0, W=W, L=L, # --------- values generated --------------------- delta=delta, # distance between pixel centres (spatial resolution of grid) grid=gr, # 2-column matrix with x/y coordinates of all pixels Habitat=x, # value of habitat covariate for each pixel Habraster = r,# a raster with the habitat covariate u1=u1, u2=u2, # x and y coordinates of all animals in the population traps=traps) # 2-column matrix of trap locations if(!perp){ dmat <- e2dist(cbind(u1,u2), traps) pbar <- trap.ind for(i in 1:nrow(dmat)){ # nrow(dmat) == N, ie, loop over animals ## This needs to loop over "traps" and flip a coin until encounter! haz <- exp(alpha0)*exp( -(dmat[i,]^2)/(2*sigma*sigma)) probs <- 1-exp(-haz) captured <- rbinom(nrow(traps), 1, probs) pbar[i] <- 1- exp(-sum(haz)) # prob that animal will be detected at least once. if(sum(captured)==0) # not captured, NAs everywhere next trap.ind[i] <- which(captured == 1)[1] # point on the line where animal first detected d.to.trap[i]<- dmat[i, trap.ind[i]] } # Trap.ind is where on the line guy was detected data <- cbind(trap.ind, d.to.trap, u1, u2) data <- data[!is.na(trap.ind),] # remove animals not detected pixel <- pixel.id[!is.na(trap.ind)] # pixels of animals detected outextra <- list( data=data, # matrix with rows for each animal captured and columns for # trap of first capture, distance to trap, x and y coordinates. pbar=pbar, # probability that the animal is captured at least once. pixel=pixel) # pixel ID of animals captured } else { # simulate ordinary DS data dmat <- abs(u2 - 0.5) probs <- exp( -dmat*dmat/(2*sigma*sigma) ) captured <- rbinom(N, 1, probs) data<- cbind(u1,u2)[captured==1,] outextra <- list( data=data, # a 2-column matrix with x and y coordinates of each animal captured. pixel = pixel.id[captured==1]) # pixel ID for each animal captured. } if(show.plots) { oldpar <- par(mar=c(3,3,3,6), "mfrow") ; on.exit(par(oldpar)) tryPlot <- try( { image(r, col=topo.colors(10)) abline(0.5, 0, lwd=2) image_scale(x, col=topo.colors(10)) points(u1, u2, pch=20, col='black', cex = 1) # plot points ### some pixels have >1 animal title("Transect HDS") if(perp) { for(i in 1:N){ if(captured[i]==1) { points(u1[i], u2[i], col='red', cex=1.5) # circle captured animals lines(c(u1[i], u1[i]), c(u2[i], 0.5) ) # lines for those captured } } } else { for(i in 1:nrow(dmat)) lines(c(u1[i], traps[trap.ind[i],1]), c(u2[i], traps[trap.ind[i],2]) ) } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } return(c(outbasic, outextra)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simSpatialDSline_AHM2_11-5.R
# Mizel, J.D., Schmidt, J.H., & Lindberg, M.S. (2018) Accommodating temporary emigration in spatial distance sampling models. Journal of Applied Ecology, 55, 1456-1464. # Appendix S1. Simulation and JAGS code for the TPP model. # Simulation code adapted from Kery and Royle (2016) ############################################################################### # Changes to mesh with AHMbook conventions: # name sim.spatialHDS.TE -> sim.spatialDSte # b1 -> beta, int.lam -> lam0, adj.sigma ~~> sigma, T -> nsurveys # argument delta removed (values other than 1 throw an error) simSpatialDSte <- function( nsites=28, # number of sites dim=10, # number of pixels along each side of the square site beta=1, # the effect of habitat on the number of individuals in a pixel lam0=2.5, # expected population size in the square nsurveys=4, # number of surveys sigma=3, # scale of half-normal detection function in pixels phi=0.6, # availability theta=2, # exponential correlation in the spatial covariate show.plots=3) { # Checks and fixes for input data ----------------------------- nsites <- round(nsites[1]) dim <- round(dim[1]) stopifNegative(lam0, allowZero=FALSE) nsurveys <- round(nsurveys[1]) stopifNegative(sigma, allowZero=FALSE) stopifnotProbability(phi) stopifNegative(theta, allowZero=FALSE) # -------------------------------------------- npixels <- dim * dim B <- dim/2 # sigma <- adj.sigma*B # Default adj.sigma is 0.6 x radius B # Create coordinates for npixels x npixels grid delta <- 1 grx <- seq(delta/2, 2*B - delta/2, delta) # mid-point coordinates gr <- expand.grid(grx,grx) # Create grid coordinates center <- matrix(B,nrow=1,ncol=2) tr <- cbind(rep(B,length(grx)),grx) d1 <- e2dist(tr,gr) d <- apply(d1,2,min) # V <- exp(-e2dist(gr,gr)/1) # V <- exp(-e2dist(gr,gr)/2) #### changed 2019-05-20 v.0.1.4.9063 V <- exp(-e2dist(gr,gr)/theta) #### changed 2019-05-20 v.0.1.4.9064 # Create spatially correlated covariate x and plot it beta0 <- log(lam0/npixels) # intercept of log(N) ~ beta0 + beta(Habitat) x <- probs <- array(NA,dim=c(npixels,nsites)) M <- rep(NA,nsites) for (j in 1:nsites){ # z <- t(chol(V))%*%rnorm(npixels) # x[,j]<- z x[,j] <- t(chol(V))%*%rnorm(npixels) # habitat covariate for this site M[j] <- rpois(1, sum(exp(beta0 + beta*x[,j]))) # number of individuals at site ?? } for (i in 1:npixels){ for (j in 1:nsites){ probs[i,j] <- exp(beta*x[i,j])/sum(exp(beta*x[,j])) # prob that animal at site j is at pixel i } } all.equal(colSums(probs), rep(1, nsites)) Mind <- max(M) superpop <- array(0, c(Mind, nsites)) for (j in 1:nsites){ # ifelse(M[j]>0, superpop[1:M[j],j] <-1, superpop[,j] <- 0) superpop[1:M[j], j] <- M[j] > 0 } all(colSums(superpop) == M) # Simulate individual locations for each survey # All individuals get locations, ignore temp emigration at this stage pixel.id <- array(NA, dim=c(Mind, nsites, nsurveys)) for (i in 1:Mind){ for (j in 1:nsites){ for (k in 1:nsurveys){ # pixel.id[i,j,k] <- sample(1:npixels, 1, replace=TRUE, prob=probs[,j]) pixel.id[i,j,k] <- sample.int(npixels, 1, prob=probs[,j]) } } } # y1 <- p <- array(NA,dim=c(Mind, nsites, nsurveys)) # p not used outside the loop y1 <- array(NA, dim=c(Mind, nsites, nsurveys)) # Simulate observations # temp emigration recognised here -> availability # p = real member of superpop x availability x detection function (half-normal) for (i in 1:Mind){ for (j in 1:nsites){ for(k in 1:nsurveys){ # p[i,j,k]<-superpop[i,j] * phi * # exp(-d[pixel.id[i,j,k]]*d[pixel.id[i,j,k]]/(2*(sigma^2))) # y1[i,j,k]<-rbinom(1, 1, p[i,j,k]) p <- superpop[i,j] * phi * exp(-d[pixel.id[i,j,k]]*d[pixel.id[i,j,k]]/(2*(sigma^2))) y1[i,j,k]<-rbinom(1, 1, p) } } } Counts <- apply(y1,2:3, sum, na.rm=TRUE) pixel.id[y1==0] <- 0 # zap pixel.id for animals not detected or not real individual # Re-shape individual data structure into counts in site x pixel x visit array y <- array(NA, dim = c(nsites, npixels, nsurveys), dimnames = list(NULL, c(1:npixels))) for(i in 1:nsites){ for (k in 1:nsurveys){ # y[i,,k] <- table(factor(paste(pixel.id[,i,k], sep = ""), levels = c(1:npixels))) y[i,,k] <- tabulate(pixel.id[,i,k], nbins=npixels) } } # Do some plots if(show.plots > 0) { show.plots <- min(show.plots, nsites) oldpar <- par(mar=c(1,1,3,1), oma=c(2,0,2,0), "mfrow") oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) for(i in 1:show.plots) { if(nsurveys < 3) par(mfrow = c(1,2)) if(nsurveys > 2) par(mfrow = c(2,2)) img <- rasterFromXYZ(cbind(gr, x[,i])) for(j in 1:min(nsurveys, 4)) { raster::plot(img, col=rampBYR(255), axes=FALSE, box=FALSE) title(main=paste("survey", j), line=0.2) points(gr[pixel.id[, i, j], , drop=FALSE], pch=16) segments(dim/2, 0, dim/2, 10, lwd=3, col='black') # The transect line } title(main=paste("Site", i, ": True population =", M[i]), cex.main=1.5, line=0, outer=TRUE) if(nsurveys > 4) mtext(paste(nsurveys - 4, "more surveys not shown"), side=1, outer=TRUE) } } return(list( # ---------- arguments supplied ----------- nsites=nsites, dim=dim, beta=beta, lam0=lam0, nsurveys=nsurveys, sigma=sigma, phi=phi, theta=theta, # ------------ values generated --------------------------- npixels=npixels, # number of pixels in each site (= dim^2) B=B, # distance from line to edge of square (= dim/2) M=M, # true number of individuals at each site d=d, # perpendicular distance of each pixel from the line Habitat=x, # pixels x sites, value of habitat covariate for each pixel y=y, # sites x pixels x surveys, number of animals detected Counts=Counts)) # sites x surveys, number of animals detected (summed over pixels) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simSpatialDSte.R
# Code from Andy, 29 Dec 2016 # Mike reorganised the plotting commands, adding devAskNewPage and show.plots. # Mike moved beta1 and npix to the arguments. sim.spatialHDS <- function(lam0 = 4 , sigma= 1.5, B=3, nsites=100, beta1 = 1, npix = 20, show.plots = 3){ # Function simulates coordinates of individuals on a square # Square is [0,2B] x[0,2B], with a count location on the point (B,B) # lam0: expected population size in the square # sigma: scale of half-normal detection function # B: circle radius # Checks and fixes for input data ----------------------------- stopifNegative(lam0, allowZero=FALSE) stopifNegative(sigma, allowZero=FALSE) stopifNegative(B, allowZero=FALSE) nsites <- round(nsites[1]) npix <- round(npix[1]) # -------------------------------------------- if(show.plots > 0) { oldpar <- par(mar=c(3,3,3,6), "mfrow") oldAsk <- devAskNewPage(ask = dev.interactive(orNone = TRUE)) on.exit({par(oldpar) ; devAskNewPage(oldAsk)}) } # npix<- 20 data<- NULL beta0<- log(lam0/(npix*npix)) # beta1<- 1 Z<- matrix(NA,nrow=npix*npix, ncol=nsites) delta<- (2*B-0)/npix grx<- seq(delta/2, 2*B - delta/2, delta) gr<- expand.grid(grx,grx) V<- exp(-e2dist(gr,gr)/1) N<- rep(NA,nsites) for(s in 1:nsites){ z<- t(chol(V))%*%rnorm( npix^2 ) Z[,s]<- z # Note Poisson assumption which means in each pixel is also Poisson N[s]<- rpois(1, sum(exp( beta0 + beta1*Z[,s]))) probs<- exp(beta1*Z[,s])/sum(exp(beta1*Z[,s])) pixel.id<- sample(1:(npix^2), N[s], replace=TRUE, prob=probs) # could simulate ranomdly within the pixel but it won't matter u1<- gr[pixel.id,1] u2<- gr[pixel.id,2] d <- sqrt((u1 - B)^2 + (u2-B)^2) # distance to center point of square p<- exp(-d*d/(2*sigma*sigma)) # Now we decide whether each individual is detected or not y <- rbinom(N[s], 1, p) if(s <= show.plots) { tryPlot <- try( { img <- rasterFromXYZ(cbind(gr,z)) image(img, col=topo.colors(10)) #draw.circle(3,3,B) image_scale(z,col=topo.colors(10)) points(u1,u2,pch=16,col='black') # points(u1[d<= B], u2[d<= B], pch = 16, col = "black") points(u1[y==1], u2[y==1], pch = 16, col = "red") points(B, B, ,pch = "+", cex = 3) # draw.circle(3, 3, B) }, silent = TRUE) if(inherits(tryPlot, "try-error")) { show.plots <- 0 # stop further plotting attempts tryPlotError(tryPlot) } } if(sum(y)>0) { data<- rbind(data, cbind(rep(s,length(u1)),u1=u1,u2=u2,d=d,y=y)) } else { data<- rbind(data, c(s, NA, NA, NA, NA)) } } dimnames(data)<-list(NULL,c("site","u1","u2","d","y")) return(list(data=data, B=B, Habitat=Z, grid=gr,N=N,nsites=nsites)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/simSpatialHDS.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # spline.prep - AHM1 section 10.14 p623 # Function to prepare input for BUGS model when fitting a spline for a covariate # (introduced in AHM1 Section 10.14) spline.prep <- function(cov, nknot = NA){ # Function chooses knots and creates design matrices for fixed and # random-effects parts of a spline model for a chosen covariate # Based on code by Crainiceanu et al. (2005) and Zuur et al. (2012) # Allows you to choose number of knots or else uses it by the rule # given in Crainiceanu et al. (2005) # Prepares fixed part of covariate as a quadratic polynomial # Determine number and position of knots # ifelse(is.na(nknot), # n.knots <- max(5, min(round(length(unique(cov))/4), 35)), # n.knots <- nknot) if(is.na(nknot)) { n.knots <- max(5, min(round(length(unique(cov))/4), 35)) } else { n.knots <- nknot } prob.tmp <- seq(0,1, length = n.knots + 2) prob <- prob.tmp[-c(1, length(prob.tmp))] knots <- quantile(unique(cov), probs = prob) # Create design matrices for fixed and random effects X <- cbind(rep(1, length(cov)), cov, cov^2) # Fixed-eff DM Z.tmp <- (abs(outer(cov, knots, "-")))^3 omega.all <- (abs(outer(knots, knots, "-")))^3 svd.omega.all <- svd(omega.all) sqrt.omega.all <- t(svd.omega.all$v %*% (t(svd.omega.all$u) * sqrt(svd.omega.all$d))) Z <- t(solve(sqrt.omega.all, t(Z.tmp))) # Rand. eff. DM # Output return(list(cov = cov, knots = knots, X = X, Z = Z)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/spline-prep_AHM1_10-14_Prepare_input_spline.R
# Functions 'standardize' and 'standardize2match' # Centre and scale a vector or array and return an object of the same class. # For an array, the mean and SD of the whole array is used. standardize <- function (x, center = TRUE, scale = TRUE) { if (!is.numeric(x)) stop("'x' must be a numeric vector or array.", call. = FALSE) if (length(center) != 1) stop("'center' must be logical or numeric of length 1.", call. = FALSE) if (length(scale) != 1) stop("'scale' must be logical or numeric of length 1.", call. = FALSE) if (is.logical(center)) { if (center) { center <- mean(x, na.rm = TRUE) x <- x - center } } else { if (!is.numeric(center)) stop("'centre' must be numeric or logical.", call. = FALSE) x <- x - center } if (is.logical(scale)) { if (scale) { scale <- sd(x, na.rm=TRUE) x <- x / scale } } else { if (!is.numeric(scale)) stop("'scale' must be numeric or logical.", call. = FALSE) x <- x / scale } return(x) } #........................................................................ # Standardize a new numeric object to the same mean and sd as # existing output from 'standardize' standardize2match <- function (x, y) { if (!is.numeric(x) || !is.numeric(x)) stop("'x' and 'y' must be a numeric vectors or arrays.", call. = FALSE) return((x - mean(y, na.rm=TRUE)) / sd(y, na.rm=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/standardize.R
# Sanity checks # Not exported. # These functions are very basic but simplify input checks while giving # informative error messages. # Without checks, users may get mysterious error messages, eg, # "is.na() applied to non-(list or vector) of type 'closure'". # base::stopifnot() is better, but error messages are still abstruse, eg, # "p >= 0 & p <= 1 is not TRUE". stopifnotNumeric <- function(arg, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) } } stopifnotEqualLength <- function(arg1, arg2) { name1 <- deparse(substitute(arg1)) name2 <- deparse(substitute(arg2)) if(length(arg1) != length(arg2)) stop("Lengths of arguments '", name1, "' and '", name2, "' must be equal.", call.=FALSE) } stopifnotGreaterthan <- function(arg, value, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(any(arg <= value)) if(allowNA) { stop("Argument '", name, "' must be greater than ", value, ", or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be greater than ", value, ".", call.=FALSE) } } } stopifnotLessthan <- function(arg, value, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(any(arg >= value)) { if(allowNA) { stop("Argument '", name, "' must be less than ", value, ", or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be less than ", value, ".", call.=FALSE) } } } } stopifnotInteger <- function(arg, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(!all(arg%%1 == 0)) { if(allowNA) { stop("Argument '", name, "' must be integer (whole number), or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be integer (whole number).", call.=FALSE) } } } } stopifnotScalar <- function(arg, allowNA=FALSE) { name <- deparse(substitute(arg)) if(length(arg) > 1) stop("Argument '", name, "' must be a single value.", call.=FALSE) if(allowNA && is.na(arg)) { # do nothing } else { if(!allowNA && is.na(arg)) stop("Argument '", name, "' must not be NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) } } stopifnotLength <- function(arg, length, allow1=FALSE) { name <- deparse(substitute(arg)) if(allow1 && length(arg) == 1) { # do nothing } else { if(length(arg) != length) { if(allow1) { stop("Argument '", name, "' must have length 1 or ", length, ".", call.=FALSE) } else { stop("Argument '", name, "' must have length ", length, ".", call.=FALSE) } } } } stopifnotProbability <- function(arg, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # An all-NA vector is logical, but ok. # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(any(arg < 0 | arg > 1, na.rm=TRUE)) { if(allowNA) { stop("Argument '", name, "' must be a probability between 0 and 1, or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be a probability between 0 and 1.", call.=FALSE) } } } } stopifnotBetween <- function(arg, min, max, allowNA=FALSE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # An all-NA vector is logical, but ok. # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(any(arg < min | arg > max, na.rm=TRUE)) { if(allowNA) { stop("Argument '", name, "' must be between ", min, " and ", max, ", or NA.", call.=FALSE) } else{ stop("Argument '", name, "' must be between ", min, " and ", max, ".", call.=FALSE) } } } } stopifNegative <- function(arg, allowNA=FALSE, allowZero=TRUE) { name <- deparse(substitute(arg)) if(allowNA && all(is.na(arg))) { # An all-NA vector is logical, but ok. # do nothing } else { if(!allowNA && any(is.na(arg))) stop("Argument '", name, "' must not contain NA or NaN.", call.=FALSE) if(!is.numeric(arg)) stop("Argument '", name, "' must be numeric.", call.=FALSE) if(allowZero) { if(any(arg < 0, na.rm=TRUE)) { if(allowNA) { stop("Argument '", name, "' must be non-negative, or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be non-negative.", call.=FALSE) } } } else { if(any(arg <= 0, na.rm=TRUE)) { if(allowNA) { stop("Argument '", name, "' must be greater than 0, or NA.", call.=FALSE) } else { stop("Argument '", name, "' must be greater than 0.", call.=FALSE) } } } } }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/stopifnot.R
# Function to deal with errors arising in plotting stuff with sim* functions. # Plotting errors should not cause the 'sim*()' function to halt but should produce # a sensible warning. # not exported # 'tryError' is the output from a 'try' call wrapped around plotting code tryPlotError <- function(tryError) { msg <- "Plotting of output failed" msg2 <- attr(tryError, "condition")$message if(!is.null(msg2)) { if(msg2 == "figure margins too large") msg2 <- "the plotting window is too small." msg <- paste(msg, msg2, sep="\n ") } if(Sys.getenv("RSTUDIO") == "1") msg <- paste(msg, "Try calling 'dev.new()' before the 'sim*' function.", sep="\n ") warning(msg, call. = FALSE) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/tryPlotError.R
# AHMbook 18.5bis.2 p.52-55 in MS "Chapter_18_FINAL.docx". # The function 'valid.data' appears in the supplementary materials (Appendix 2) of # Chambert, T., Waddle, J.H., Miller, D.A.W., Walls, S.C., & Nichols, J.D. (2017) A new framework for analysing automated acoustic species-detection data: occupancy estimation and optimization of recordings post-processing. Methods in Ecology and Evolution, 9, 560-570. # 'valid_data' performs the same function, but with new code by Mike Meredith, 2019-03-22. # Both implementations select detections to validate in a series of rounds until the desired number, n.valid, has been reached. In the last round, not all candidate sites can be included without exceeding n.valid; valid.data takes the first sites in the list, valid_data takes a random sample. ### Arguments: # N = vector of ALL detection counts (TP + FP) # tp = vector of true positive (TP) detection counts # n.valid = NUMBER of detections to be validated (if prop.valid=FALSE) # n.valid = PROPORTION of detections to be validated (if prop.valid=TRUE) # Returns a list with 2 components: # n : the number of detections validated at each site # k : the number of detections checked and found to be valid at each site valid_data <- function(N, tp, n.valid, prop.valid=FALSE) { if(prop.valid){ n.valid <- round(n.valid*sum(N)) } # -------------- Check and fix input ----------------------- stopifnotEqualLength(N, tp) stopifnot(all(N >= tp)) n.valid <- round(n.valid[1]) stopifNegative(n.valid) # ------------------------------------------------------------ if(n.valid > sum(N)) warning("n.valid is greater than the total number of detections\n", "ALL will be validated", call.=FALSE) if(n.valid >= sum(N)) return(list(n=N, k=tp)) nsites <- length(N) # We will treat detections 1:tp[i] as true, the rest false. # We will randomise the selection of detections to validate. # (We do not need to randomise both.) # We will need to draw > 1 value per site, without drawing any twice, # so we decide now on the (random) order of the draws for each site. # Each round of validation corresponds to one column of 'order'. order <- matrix(0, nrow=nsites, ncol=max(N)) for(i in 1:nsites) { if(N[i] > 0) order[i, 1:N[i]] <- sample.int(N[i]) } stopifnot(all(rowSums(order > 0) == N)) # check # Do successive rounds of validation until we have enough: n <- k <- numeric(nsites) # vectors of zeros wanted <- n.valid - sum(n) # how many do we need to check? for(i in 1:max(N)) { todo <- which(order[, i] > 0) # which sites have i (or more) detections if(length(todo) > wanted) # if more than we want, subsample todo <- sample(todo, size=wanted) n[todo] <- n[todo] + 1 k[todo] <- k[todo] + (order[todo,i] <= tp[todo]) # these are the good detections wanted <- n.valid - sum(n) # how many do we still need to check? if(wanted == 0) break } return(list(n=n, k=k)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/valid_data.R
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM) # Marc Kery & Andy Royle, Academic Press, 2016. # wigglyOcc - AHM1 section 10.14 p622 # Function to generate a static occupancy data set with really wiggly covariate relationships # in occupancy and detection probability # (introduced in AHM1 Section 10.14) wigglyOcc <- function(seed = 1, show.plot = TRUE, verbose = TRUE){ # Function simulates really wiggly static site-occupancy data # # seed is for random number generator # # Choose sample sizes and seed for repeatability M <- 240 # Number of sites J <- 3 # Number of replicates set.seed(seed) # Allow repeatability # Ecological process: Generation of latent occurrence state Xsite <- seq(-2, 2, length.out = M) psi <- c(seq(0.1, 0.9,,80), rep(0.9,,80), rep(0.3,,80)) z <- rbinom(M, 1, psi) # Observation process: Generation of observed data # Put covariate Xsurvey and p in order Xsurvey <- seq(-2, 2,, M*J) p.bp <- c(0, 0.6, 0.2, 0.9, 0.2, 0, 0.2) # "break points" for p model p.ordered <- c(seq(p.bp[1], p.bp[2],, 120), seq(p.bp[2], p.bp[3],, 120), seq(p.bp[3], p.bp[4],, 120), seq(p.bp[4], p.bp[5],, 120), seq(p.bp[5], p.bp[6],, 120), seq(p.bp[6], p.bp[7],, 120)) x.index <- sample(1:length(Xsurvey)) Xsurvey <- matrix(Xsurvey[x.index], M, J, byrow = FALSE) p <- matrix(p.ordered[x.index], M, J, byrow = FALSE) # Sample detection/nondetection data y <- array(dim = c(M, J)) for(j in 1:J){ y[,j] <- rbinom(M, 1, z * p[,j]) } # Look at data and produce some summaries head(cbind(z = z, p = p, y = y)) if(verbose) { cat(" True number of occupied sites:", sum(z), "\n") cat(" Observed number of occupied sites:", sum(apply(y,1,max)), "\n") cat(" Proportional underestimation of distribution:", round((sum(z)-sum(apply(y,1,max)))/ sum(z), 2), "\n") } # Plot system (state and observation) if(show.plot) { op <- par(mfrow = c(1,2), cex.main = 0.8) ; on.exit(par(op)) tryPlot <- try( { plot(Xsite, psi, main = "Occupancy probability (red) and \nrealized presence/absence (black circles)", type = "l", ylim = c(-0.1, 1.1), col = "red", xlab = "Site covariate (Xsite)", lwd = 2, frame = FALSE) points(Xsite, jitter(z, amount = 0.02)) plot(Xsurvey[order(x.index)], p[order(x.index)], type = "l", col = "red", main = "Detection probability (red) and \nobserved data (black circles)", xlab = "Survey covariate (Xsurvey)", ylab = "p", ylim = c(-0.1,1.1), lwd = 2, frame = FALSE) points(Xsurvey, jitter(y, amount = 0.02)) }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } return(list(M = M, J = J, Xsite = Xsite, Xsurvey = Xsurvey, psi = psi, z = z, p = p, y = y, x.index=x.index, p.ordered=p.ordered)) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/wigglyOcc_AHM1_10-14_Simulate_occupancy_wiggly_covars.R
# Generate starting values for survival analysis in JAGS or WinBUGS # AHM2 section 3.2.3 zinit <- function(CH){ CH <- round(as.matrix(CH)) # could also be a data frame f <- suppressWarnings(apply(CH, 1, function(x) min(which(x!=0)))) # occasion of first capture zinit <- array(NA, dim = dim(CH)) for(i in 1:nrow(CH)){ if(f[i] >= ncol(CH)) # first captured on last occasion (or never!) next zinit[i,(f[i]+1):ncol(CH)] <- 1 } return(zinit) }
/scratch/gouwar.j/cran-all/cranData/AHMbook/R/zinit.R
# 1. Define an R function to generate dynamic presence/absence systems with 'space' # Code to define a function for simulating data. # AHM2 - 9.6.1.1 #### Uses 'fields' instead of 'RandomFields' #### simDynoccSpatial <- function(side = 50, nyears = 10, nsurveys = 3, mean.psi1 = 0.4, beta.Xpsi1 = 0, range.phi = c(0.8, 0.8), beta.Xphi = 0, range.gamma = c(0.1, 0.1), beta.Xgamma = 0, range.p = c(0.4, 0.4), beta.Xp = 0, theta.XAC = 5000, beta.XAC = c(0, 0, 0, 0), beta.Xautolog = c(0, 0), trend.sd.site = c(0, 0), trend.sd.survey = c(0, 0), seed.XAC = NA, seed = NULL, show.plots= TRUE, ask.plot = TRUE, verbose=TRUE) { # # Written by Marc Kéry, 2014-2018 # # Function to simulate detection/nondetection data in a square area # under a very general dynamic site-occ model, including the # following effects: # (1) annual variation in the probabilities of patch persistence, # colonization and detection can be specified by the bounds of a # uniform distribution. # (2) one site-, site/year-, and site/year/rep-specific covariate # is allowed to affect the probabilities of occupancy # (beta.Xpsi1 for site-covariate), colonisation/persistence # (beta.Xgamma, beta.Xphi, for yearly site-covariate), and # detection (beta.Xp for observational covariate), respectively. # (3) a single, spatially structured covariate for habitat suitability # may affect all parameters via coefficient beta.XAC (for a # biologically reasonable way, choose coefficients with the same sign # for all 4 (mediated by underlying density). # That spatial covariate is simulated as a Gaussian random field # with negative exponential correlation function with # 'range parameter' theta.XAC # (4) autologistic effects (beta.Xautolog) in persistence and colonization # probability can be chosen, which fits a logistic regression of # these parameters on the proportion of occupied neighbouring cells # (in a queen's or 2nd order neighbourhood) during the previous time step # (5) Additional detection heterogeneity can be introduced # at the site- or the individual survey level, with the possibility of a # temporal trend in this heterogeneity. For instance, an annual trend in # detection heterogeneity at the site or the survey level is specified by # the value in the first and the last year. # Hence, trend.sd.site = c(0, 1) will result in a linear trend in # the magnitude of site heterogeneity in detection from 0 in the # first year to 1 in the last year. # # # Function arguments: # ------------------- # # *** Design of study and basic 'magnitude' of parameters *** # side – side length of square simulation area. Therefore, # the number of sites, or cells, M = side^2 # nsurveys – Number of replicate surveys within a 'season', year or primary period # nyears – Number of years (or 'seasons') # mean.psi1 – intercept of occupancy probability in year 1 # range.phi and range.gamma – bounds of uniform distribution from which # annual intercepts for persistence (phi) and colonisation (gamma) # are drawn # range.p – same for detection probability p # # # *** Covariates *** # beta.Xpsi1: coefficient of a site covariate in psi1 # beta.Xphi: coefficient of a site/year covariate in phi # beta.Xgamma: coefficient of a site/year covariate in gamma # beta.Xp: coefficient of a site/year/rep covariate in p # # # *** Parameters governing the spatial correlations *** # theta.XAC: 'range parameter' of a covariate with exponential # spatial correlation (i.e., a Gaussian random field is used as an # environmental covariate). NOTE: if you want to set to zero the effects # of this spatially autocorrelated variable, you CANNOT # set theta.XAC=0 because this breaks the function, # nor can you simply choose a very small value. # Instead you MUST set the elements of coefficients vector beta.XAC # to zero. # beta.XAC: vector of coefficients of that field for the 4 model params: # psi1, phi, gamma, and p (in that order) # beta.Xautolog – vector of coefficients of autologistic covariate # in the following order: persistence (phi), colonization (gamma). # Autocovariate is computed at every season as the proportion of # occupied cells in a queen's neighbourhood around each cell. # # # *** Detection heterogeneity *** # trend.sd.site: range of year-specific values of SD of Gaussian # random site effects in p: c(1,1) specifies constant value of 1 # for all years, while c(0,1) specifies linear increase over the years # from 0 to 1. # trend.sd.survey: range of year-specific values of standard deviation # of Gaussian random survey effects in p: specification as # for trend.sd.site # # *** Graphics control and other *** # seed – allows to 'fix' the simulation such that it becomes reproducible # ask.plot – if TRUE permits to browse through plots (otherwise if FALSE) if(FALSE) {x <- NULL; rm(x)} # Stops R CMD check choking on 'curve'. # Checks and fixes for input data ----------------------------- side <- round(side[1]) nyears <- round(nyears[1]) stopifnotGreaterthan(nyears, 1) nsurveys <- round(nsurveys[1]) stopifnotProbability(mean.psi1) stopifnotProbability(range.phi) # bounds stopifnotProbability(range.gamma) # bounds stopifnotProbability(range.p) # bounds stopifNegative(theta.XAC, allowZero=FALSE) stopifnotLength(beta.XAC, 4) stopifnotLength(beta.Xautolog, 2) stopifnotLength(trend.sd.site, 2) # trend stopifNegative(trend.sd.site) stopifnotLength(trend.sd.survey, 2) # trend stopifNegative(trend.sd.survey) # ---------------------------------------------------------------- # Restore graphical settings on exit ----------------------------- if(show.plots) { oldpar <- par("mfrow", "mar", "cex.main", "cex.lab", "cex.axis") oldAsk <- devAskNewPage(ask = ask.plot && dev.interactive(orNone=TRUE)) on.exit({par(oldpar); devAskNewPage(oldAsk)}) } # ---------------------------------------------------------------- # Create grid xcoord <- 1:side ycoord <- 1:side grid <- as.matrix(expand.grid(x=xcoord, y=ycoord)) M <- side^2 # Total number of cells or sites # Compute adjacency matrix for grid neigh <- spdep::dnearneigh(as.matrix(grid), d1 = 0, d2 = sqrt(2) * 1 + 0.01) winnb <- spdep::nb2WB(neigh) # Function to get CAR ingredients for BUGS nneigh <- winnb$num # number of neighbours amatrix <- spdep::nb2mat(neigh) amatrix[amatrix > 0] <- 1 # Neighbours get a 1, non-neighbours a 0 # Set up arrays needed site <- 1:M # Sites year <- 1:nyears # Years prob <- array(dim = c(side, side)) # p matrix psi <- muZ <- z <- array(dim = c(side, side, nyears)) # Occupancy, occurrence phi <- gamma <- array(NA, dim = c(side, side, (nyears-1))) # Survival, colonisation Xauto <- array(NA, dim = c(side, side, nyears)) # Autocovariate y <- p <- array(NA, dim = c(side, side, nsurveys, nyears)) # Det. histories and p # Create values of 1 spatially autocorrelated covariate XAC # Generate correlated random variables in a square if(requireNamespace("RandomFields", quietly=TRUE)) { RandomFields::RFoptions(seed=seed.XAC) # Default NA; 88 gives cool pattern XAC <- matrix(RandomFields::RFsimulate(RandomFields::RMexp(var = 1, scale = theta.XAC), x=xcoord, y=ycoord, grid=TRUE)@data$variable1, ncol = side, byrow = TRUE) # variance 1 if(!is.na(seed.XAC)) RandomFields::RFoptions(seed=NA) } else { message("Using package 'fields' instead of 'RandomFields'; see help(simDynoccSpatial).") if(!is.na(seed.XAC)) set.seed(seed.XAC) obj <- circulantEmbeddingSetup(grid=list(x=xcoord, y=ycoord), Covariance="Exponential", aRange=theta.XAC) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'side' or 'theta.XAC'.") XAC <- matrix(tmp, ncol = side, byrow = TRUE) } set.seed(seed=seed) # Default NULL; do this AFTER RFsimulate # Create four spatially unstructured covariates # Site covariate for psi1 Xpsi1 <- matrix(runif(M, -2, 2), ncol = side) # Yearly-site covariates for phi and gamma Xphi <- Xgamma <- array(runif(M*nyears, -2, 2), dim = c(side, side, nyears)) # Observational covariate for p Xp <- array(runif(M*nsurveys*nyears,-2,2), dim=c(side, side,nsurveys,nyears)) # Draw values of baseline levels of the main parameters # (i.e., draw year effects if any) mean.phi <- runif(n = nyears-1, min = min(range.phi), max = max(range.phi)) mean.gamma <- runif(n = nyears-1, min = min(range.gamma), max = max(range.gamma)) mean.p <- runif(n = nyears, min = min(range.p), max = max(range.p)) # (a) Simulate state process parameters: initial state (first year) psi[,,1] <- plogis(qlogis(mean.psi1) + beta.Xpsi1 * Xpsi1 + beta.XAC[1] * XAC) # psi1 # (b) Simulate state in first year z[,,1] <- rbinom(M, 1, psi[,,1]) # Initial occurrence state # Compute value of autocovariate after first year = proportion of neighbours occupied # first vectorize and then put into matrix again Xautovec <- amatrix %*% c(z[,,1]) Xauto[,,1] <- matrix(Xautovec/nneigh, ncol = side) # Put back in matrix by column again # Do the pre-loop plots # --------------------- if(show.plots) { tryPlot <- try( { # Plot effects of autocovariate on (year-specific) phi and gamma par(mfrow = c(1, 2)) curve(plogis(qlogis(mean.phi[1]) + beta.Xautolog[1] * x), 0, 1, main = "Persistence: \nphi ~ Year + Autocovariate", xlab = "Autocov. (prop. occupied neighb.)", ylab = "phi", ylim = c(0,1), frame = FALSE) for(k in 2:(nyears-1)){ curve(plogis(qlogis(mean.phi[k])+beta.Xautolog[1]*x),0,1,add=TRUE) } curve(plogis(qlogis(mean.gamma[1]) + beta.Xautolog[2] * x), 0, 1, main = "Colonization: \ngamma ~ Year + Autocovariate", xlab = "Autocovariate (prop. occupied neighb.)", ylab = "gamma", ylim = c(0,1), frame = FALSE) for(k in 2:(nyears-1)){ curve(plogis(qlogis(mean.gamma[k])+beta.Xautolog[2]*x),0,1,add=TRUE) } # Simulate true system dynamics par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) # Plot random field covariate XAC # rows are in x, columns in y direction image(1:side, 1:side, XAC, col=topo.colors(100), main = paste("Gaussian random field XAC with \n neg. exponential correlation (range =", theta.XAC, ")"), xlab = 'x', ylab = 'y') image(1:side, 1:side, psi[,,1], col=topo.colors(100), main = paste("Initial occupancy probability"), xlab = 'x', ylab = 'y') image(1:side, 1:side, z[,,1], col=c("white", "black"), main = paste("Initial presence/absence (true system state z):\n black = occupied, white = unoccupied"), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, Xauto[,,1], col=topo.colors(100), main = "Autocovariate between year 1 and year 2", xlab = 'x', ylab = 'y') }, silent = TRUE) if(inherits(tryPlot, "try-error")) { show.plots <- FALSE tryPlotError(tryPlot) } } # (c) Simulate state process parameters: time steps 2:nyears for(k in 2:nyears){ par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) if(verbose) cat(paste("** Year", k, "**\n")) # Compute colonisation and extinction parameters and plot phi[,,k-1] <- plogis(qlogis(mean.phi[k-1]) + beta.Xphi * Xphi[,,k-1] + beta.XAC[2] * XAC + beta.Xautolog[1] * Xauto[,,k-1]) gamma[,,k-1] <- plogis(qlogis(mean.gamma[k-1]) + beta.Xgamma * Xgamma[,,k-1] + beta.XAC[3] * XAC + beta.Xautolog[2] * Xauto[,,k-1]) # Compute latent states and plot muZ[,,k] <- z[,,k-1]*phi[,,k-1] + (1-z[,,k-1])*gamma[,,k-1] z[,,k] <- rbinom(M, 1, muZ[,,k]) # Compute autocovariate and plot Xautovec <- amatrix %*% c(z[,,k]) Xauto[,,k] <- matrix(Xautovec/nneigh, ncol = side) # re-assemble by column # Do the in-loop plots # -------------------- if(show.plots) { tryPlot <- try( { image(1:side, 1:side, phi[,,k-1], col=topo.colors(100), main = paste("Persistence between year", k-1, "and year", k), xlab = 'x', ylab = 'y') image(1:side, 1:side, gamma[,,k-1], col=topo.colors(100), main = paste("Colonization between year", k-1, "and year", k), xlab = 'x', ylab = 'y') image(1:side, 1:side, z[,,k], col=c("white", "black"), main = paste('Presence/absence (z) in year', k, ':\n black = occupied, white = unoccupied'), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, Xauto[,,k], col=topo.colors(100), main = paste("Autocovariate between year", k, "and year", k+1), xlab = 'x', ylab = 'y') }, silent = TRUE) if(inherits(tryPlot, "try-error")) { show.plots <- FALSE tryPlotError(tryPlot) } } } # (d) Observation process parameters # First choose values of annual SD of p random effects sd.site <- seq(from = trend.sd.site[1], to = trend.sd.site[2], length.out = nyears) sd.survey <- seq(from = trend.sd.survey[1], to = trend.sd.survey[2], length.out = nyears) for(k in 1:nyears){ # Site random effects eps1 <- matrix(rnorm(n = M, mean = 0, sd = sd.site[k]), ncol = side) # Survey random eff. eps2 <- rnorm(n = nsurveys, mean = 0, sd = sd.survey[k]) for(j in 1:nsurveys){ p[,,j,k] <- plogis(qlogis(mean.p[k]) + beta.Xp * Xp[,,j,k] + beta.XAC[4] * XAC + eps1[,] + eps2[j]) } } # Simulate actual observation process (also updating entire grid in one go) for(k in 1:nyears){ # Loop over years for(j in 1:nsurveys){ # Loop over replicates prob <- z[,,k] * p[,,j,k] # zero out p for unoccupied sites y[,,j,k] <- rbinom(M, 1, prob) # image(1:side, 1:side, y[,,j,k], main = paste("Year", k, "and rep", j)) # Look at clumped pattern in y } } # Derived quantities # Compute annual population occupancy for (k in 2:nyears){ psi[,,k] <- psi[,,k-1]*phi[,,k-1] + (1-psi[,,k-1])*gamma[,,k-1] } mean.psi <- apply(psi, 3, mean) # Average psi over sites # Compute true and observed number of occupied sites zobs <- apply(y, c(1,2,4), max) nocc <- apply(z, 3, sum) nocc.obs <- apply(zobs, 3, sum) # Do the post-loop plots # ---------------------- psi.app <- apply(apply(y, c(1,2,4), max), 3, mean) if(show.plots) { tryPlot <- try( { # (4) More plots comparing true states and observations # Plot realised and apparent occupancy par(mfrow = c(1,1)) plot(year, apply(z, 3, mean), type = "l", xlab = "Year", ylab = "Occupancy or Detection prob.", col = "red", xlim = c(0,nyears+1), ylim = c(0,1), lwd = 2, lty = 1, frame.plot = FALSE, las = 1) lines(year, mean.p, type = "l", col = "red", lwd = 2, lty = 2) lines(year, psi.app, type = "l", col = "black", lwd = 2) text(0.8*nyears, 0.1, labels = "red solid - true occupancy prob.\n red dashed - detection prob.\n black - observed proportion occupied", cex = 1) # Plots comparing true and observed latent states par(mfrow = c(2,2), mar = c(5,4,5,2), cex.main = 1.3, cex.lab = 1.5, cex.axis = 1.2) for(k in 1:nyears){ image(1:side, 1:side, z[,,k], col=c("white", "black"), main = paste('Presence/absence (z) in year', k), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") image(1:side, 1:side, zobs[,,k], col=c("white", "black"), main = paste('Ever_detected (zobs) in year', k), xlab = 'x', ylab = 'y') abline(h = 0:side+0.5, v = 0:side+0.5, col = "lightgrey") } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Compute values of naive autocovariate (observed prop. # of occupied neighbouring cells) Xautoobs <- array(NA, dim = dim(zobs)) for(k in 1:nyears){ # Loop over years for(i1 in 1:side){ # Loop over one side (hopefully X) for(i2 in 1:side){ # Loop over other side (hopefully Y) i1.start <- max(1,(i1-1)) i1.end <- min(side,(i1+1)) i2.start <- max(1,(i2-1)) i2.end <- min(side,(i2+1)) Xautoobs[i1,i2,k] <- (sum(zobs[i1.start:i1.end,i2.start:i2.end,k])- zobs[i1,i2,k]) / (length(zobs[i1.start:i1.end,i2.start:i2.end,k]) - 1) } } } out <- list( # ----------------- values input ----------------------- side=side, nyears=nyears, nsurveys=nsurveys, mean.psi1=mean.psi1, beta.Xpsi1=beta.Xpsi1, range.phi=range.phi, beta.Xphi=beta.Xphi, range.gamma=range.gamma, beta.Xgamma=beta.Xgamma, range.p=range.p, beta.Xp=beta.Xp, theta.XAC=theta.XAC, beta.XAC= beta.XAC, beta.Xautolog=beta.Xautolog, trend.sd.site=trend.sd.site, trend.sd.survey=trend.sd.survey, seed=seed, seed.XAC = seed.XAC, # ----------------- values generated -------------------- M=M, # total number of pixels in the study area grid=grid, # 2-column matrix, the x and y coordinates of the pixels amatrix = amatrix, # MxM matrix, [i,j] = 1 if cell i and j are neighbours, 0 otherwise Xpsi1 = Xpsi1, # side x side matrix, value of covariate affecting initial occupancy (psi1) Xphi = Xphi, # side x side x nyears array, value of covariate affecting persistence (phi) Xgamma = Xgamma, # side x side x nyears array, value of covariate affecting colonisation (gamma) Xp = Xp, # side x side x nsurveys x nyears array, value of covariate affecting detection (p) XAC=XAC, # side x side matrix, the spatially correlated covariate Xauto = Xauto, # side x side x nyears array, autocovariate, proportion of neighbouring cells occupied Xautoobs = Xautoobs,# side x side x nyears array, observed autocovariate, proportion of neighbouring cells where species detected sd.site=sd.site, # vector nyears, year-specific values of SD of Gaussian random site effects in p sd.survey=sd.survey,# vector nyears, year-specific values of SD of Gaussian random survey effects in p mean.phi=mean.phi, # vector nyears-1, year-specific intercept of persistence on probability scale mean.gamma=mean.gamma,# vector nyears-1, year-specific intercept of colonisation on probability scale mean.p=mean.p, # vector nyears, year-specific intercept of detection probability on probability scale psi=psi, # side x side x nyears array, probability of occupancy of cell mean.psi=mean.psi, # vector nyears, mean occupancy over all cells psi.app=psi.app, # vector nyears, apparent occupancy, proportion of cells where species detected z=z, # side x side x nyears array, true occupancy status of each cell in each year (1 if occupied) zobs=zobs, # side x side x nyears array, observed occupancy status of each cell in each year (1 if detected) nocc = nocc, # vector nyears, the true number of cells occupied each year nocc.obs = nocc.obs,# vector nyears, the number of cells where species detected each year phi=phi, # side x side x nyears-1 array, probability of persistence in each interval between years gamma=gamma, # side x side x nyears-1 array, probability of colonisation in each interval between years p=p, # side x side x nsurveys x nyears array, probability of detection y = y) # side x side x nsurveys x nyears array, detection history, 1 if species detected. # Add an unmarked data frame object out$umf <- conv2UM(out) return(out) } # ------------------------------------------------------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/inst/RandomFieldsSupport/simDynoccSpatial.R
# Define function for simulating spatially correlated random field # AHM2 - 9.2 # Called by functions `simNmixSpatial` and `simOccSpatial`. # Modified to use package 'fields' if 'RandomFields' is not available. # In DESCRIPTION file: # - add 'fields' to Imports # - move RandomFields from Imports to Suggests # In NAMESPACE file: # - comment out or delete importFrom("RandomFields", "RFoptions", "RFsimulate", "RMexp") # - add: importFrom("fields", "circulantEmbeddingSetup", "circulantEmbedding") # ------------ Start of function definition ---------------- simExpCorrRF <- function(variance = 1, theta = 1, size = 50, seed = NA, show.plots = TRUE){ # Function creates Gaussian random field with negative # exponential correlation and visualizes correlation and random field # # Function arguments: # theta: parameter governing spatial correlation (=1/phi) # ("large theta means long range of correlation") # Note that RMexp is specified in terms of phi = 1/theta # variance: variance of field, set at 1 # grid.size: Number of pixels in either direction # show.plot: if TRUE, plots of the data will be displayed; # set to FALSE if you are running simulations or use inside of other fct's. # Generate correlated random variables in a square step <- 1 x <- seq(1, size, step) y <- seq(1, size, step) # grid <- as.matrix(expand.grid(x,y)) grid <- cbind(x = rep(x, each=size), y = y) if(requireNamespace("RandomFields", quietly=TRUE)) { RandomFields::RFoptions(seed=seed) field <- RandomFields::RFsimulate(RandomFields::RMexp(var = variance, scale = theta), x=x, y=y, grid=TRUE)@data$variable1 RandomFields::RFoptions(seed=NA) } else { message("Using package 'fields' instead of 'RandomFields'; see help(simExpCorrRF).") if(!is.na(seed)) set.seed(seed) # Only for compatibility with RandomFields, better to set seed before calling simExpCommRF obj <- circulantEmbeddingSetup(grid=list(x=x, y=y), Covariance="Exponential", aRange=theta) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'size' or 'theta'.") field <- as.vector(tmp * sqrt(variance)) } # Plots # Correlation function if(show.plots){ oldpar <- par(mfrow = c(1,2), mar = c(5,5,4,2), "cex.main") ; on.exit(par(oldpar)) tryPlot <- try( { dis <- seq(0.01, 20, by = 0.01) corr <- exp(-dis/theta) plot(dis, corr, type = "l", xlab = "Distance", ylab = "Correlation", ylim = c(0,1), col = "blue", lwd = 2) text(0.8*max(dis), 0.8, labels = paste("theta:", theta)) # Random field # image(x, y, field,col=topo.colors(20), main = paste("Gaussian random field with \n negative exponential correlation (theta =", theta, ")"), cex.main = 1) par(mar = c(3,2,5,1)) raster::plot(rasterFromXYZ(cbind(grid, field)), col=topo.colors(20), main = paste("Gaussian random field with \n negative exponential correlation (theta =", theta, ")"), cex.main = 1, legend=FALSE, box=FALSE) box() }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Output return(list(variance = variance, theta = theta, size = size, seed = seed, field = field, grid = grid)) } # ------------ End of function definition ----------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/inst/RandomFieldsSupport/simExpCorrRF.R
# AHM2 Chapter 10 simPPe <- function(lscape.size = 150, buffer.width = 25, variance.X = 1, theta.X = 10, M = 250, beta = 1, quads.along.side = 6, show.plots = TRUE) { # # Name means 'SIMulate Point Pattern Educational version' # Function to simulate spatial point pattern # in a heterogeneous landscape simulated on a square grid. # The study area ('core') is simulated inside of a larger landscape that # includes a buffer. The size of the core is defined by the lscape.size # minus twice the buffer. # There is one habitat covariate X that affects the intensity # of the points. # X is spatially structured with negative exp. spatial autocorrelation; # the parameters of the field can be chosen to create large # 'islands' of similar values or no 'islands' at all, # in which case the field is spatially unstructured. # # The intensity of STATIC points (e.g. home-range centers) # may be inhomogeneous and affected by the coefficient beta, # which is the log-linear effect of X. # # *** Function arguments *** # lscape.size: total side length of simulated landscape (length in units, # this includes a buffer) # buffer.width: width of buffer around core study area # variance.X: variance of Gaussian random field (covariate X) # theta.X: scale parameter of correlation in field (must be >0) # M: Expected number of activity centers in core area # beta: coefficient of the habitat # quads.along.side: number of quadrats along the side of the core area # (this is the parameter of the gridding process # and determines the size of the quadrats) # -------------- Check and fix input ----------------------- buffer.width <- round(buffer.width[1]) stopifNegative(buffer.width) quads.along.side <- round(quads.along.side[1]) stopifNegative(quads.along.side, allowZero=FALSE) lscape.size <- round(lscape.size[1]) stopifnotGreaterthan(lscape.size, 2 * buffer.width + quads.along.side - 1) variance.X <- variance.X[1] stopifNegative(variance.X) theta.X <- theta.X[1] stopifNegative(theta.X) M <- round(M[1]) stopifNegative(M) # ------------------------------------------------------------ # --------------- Define basic geometry of simulation -------------- # # Size of core study area (the 'core') and its proportion of total landscape area size.core <- lscape.size - 2 * buffer.width prop.core <- size.core^2/lscape.size^2 # ratio core area / total area # Discrete approximation of total landscape pixel.size <- 1 # length of side of square pixel of simulated landscape # Coordinates (mid-points of basic pixel unit of simulation) and lscape x <- seq(1, lscape.size, pixel.size)-0.5 # x coordinate of pixels y <- seq(1, lscape.size, pixel.size)-0.5 # y coordinate of pixels grid <- as.matrix(expand.grid(x,y)) # resulting grid # Compute lambda of point pattern: limit of expected number of points per areal unit, when latter goes towards zero lambda_pp <- M / size.core^2 # Define a core area in the middle of the square # This core is then divided up to define a number of quadrats # within which abundance and occurrence are measured quad.size <- size.core / quads.along.side breaks <- seq(buffer.width, size.core+buffer.width, by = quad.size) # boundaries of quadrats mid.pt <- breaks[-length(breaks)] + 0.5 * quad.size # quadrat mid-points core <- range(breaks) # range of x and y coordinates in the core nsite <- length(mid.pt)^2 # Simulate habitat covariate: a spatially correlated Gaussian random field (i.e., a Gaussian random field with negative exponential corr.) if(requireNamespace("RandomFields", quietly=TRUE)) { RandomFields::RFoptions(seed=NA) field <- matrix(RandomFields::RFsimulate(RandomFields::RMexp(var = variance.X, scale = theta.X), x=x, y=y, grid=TRUE)@data$variable1, ncol = lscape.size) # MVN r.v. with spatial correlation } else { message("Using package 'fields' instead of 'RandomFields'; see help(simPPe).") obj <- circulantEmbeddingSetup(grid=list(x=x, y=y), Covariance="Exponential", aRange=theta.X) tmp <- try(circulantEmbedding(obj), silent=TRUE) if(inherits(tmp, "try-error")) stop("Simulation of random field failed.\nTry with smaller values for 'lscape.size' or 'theta.X'.") field <- matrix(tmp * sqrt(variance.X), ncol = lscape.size) } # --------------- Simulate points in the field -------------------- # # Simulate binomial point process for activity centers M2 <- round(M/prop.core) # Number of individuals in the total landscape # (incl. buffer) # Simulate point locations as function of habitat covariate x probtemp <- exp(beta[1]*c(field)) # log-linear model for intensity on X probs <- probtemp / sum(probtemp) # normalize to get probability of getting a point in a pixel pixel.id <- sort(sample(1:lscape.size^2, M2 , replace=TRUE, prob=probs)) # Simulate locations randomly within the pixel (unlike sim.spatialDS) u1 <- grid[pixel.id,1] + runif(M2, -pixel.size/2, pixel.size /2) u2 <- grid[pixel.id,2] + runif(M2, -pixel.size/2, pixel.size /2) u <- cbind(u1, u2) # collect AC coordinates together # ------ Summarization of point pattern within quadrats ------------- # # This is INSIDE of the observation core # # Summarization for abundance (N) at every quadrat Nac <- as.matrix(table(cut(u[,1], breaks=breaks), cut(u[,2], breaks= breaks))) # quadrat-specific abundance for AC E_N <- round(mean(Nac),2) # average realized abundance per quadrat # Summarization for presence/absence (z) at every quadrat zac <- Nac ; zac[zac>1] <- 1 # quadrat-specific occurrence for AC E_z <- round(mean(zac), 2) # proportion occupied quadrats # ------------------ Visualizations --------------------------- if(show.plots) { oldpar <- par(mfrow = c(1, 3), mar = c(4,2,5,2), cex.main = 1.8, cex.axis = 1.2) ; on.exit(par(oldpar)) tryPlot <- try( { # *** Fig. 1: Original point pattern # Random field of X with activity-centers overlaid image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Point pattern with\ncore and buffer area", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste("Mean intensity (lambda) =", round(lambda_pp, 5)), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # points(u1, u2, pch=20, col='black', cex = 1.2) # plot points # *** Fig. 2: Show abundance and presence/absence in each quadrat on original landscape *** # Covariate 1: the Gaussian random field with autocorrelation # Reproduce random field with activity centers image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Abundance, N", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste0("Mean(N) = ", E_N, ", var(N) = ", round(var(c(Nac)), 2)), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) # Add activity centers points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # Overlay survey quadrats for(i in 1:length(breaks)){ for(k in 1:length(breaks)){ segments(breaks[i], breaks[k], rev(breaks)[i], breaks[k]) segments(breaks[i], breaks[k], breaks[i], rev(breaks)[k]) } } # Print abundance into each quadrat for(i in 1:length(mid.pt)){ for(k in 1:length(mid.pt)){ text(mid.pt[i], mid.pt[k], Nac[i,k], cex =4^(0.8-0.5*log10(quads.along.side)), col='red') } } # Figure 3 for presence/absence of activity centers (= distribution) # Reproduce random field with activity centers image(rasterFromXYZ(cbind(grid, c(field))), col=topo.colors(10), main = "Occurrence, z", xlab = "", ylab = "", axes = FALSE, asp = 1) mtext(paste("Mean(z) =", E_z), side=1) polygon(c(buffer.width, size.core+buffer.width, size.core+buffer.width, buffer.width), c(buffer.width, buffer.width, size.core+buffer.width, size.core+buffer.width), lwd = 2, lty = 1) # Add activity centers points(u[,1], u[,2], pch=20, col='black', cex = 1.2) # plot points # Overlay quadrats for(i in 1:length(breaks)){ for(k in 1:length(breaks)){ segments(breaks[i], breaks[k], rev(breaks)[i], breaks[k]) segments(breaks[i], breaks[k], breaks[i], rev(breaks)[k]) } } # Print presence/absence into each quadrat for(i in 1:length(mid.pt)){ for(k in 1:length(mid.pt)){ text(mid.pt[i], mid.pt[k], zac[i,k], cex =4^(0.8-0.5*log10(quads.along.side)), col='red') } } # Mike: Shade UNoccupied quadrats (which have abundance N = 0 or occurrence z = 0) for(i in 1:(length(breaks)-1)){ for(k in 1:(length(breaks)-1)){ if(zac[i,k] == 1) # grey-out UNoccupied quads next polygon(c(breaks[i], breaks[i+1], breaks[i+1], breaks[i]), c(breaks[k], breaks[k], breaks[k+1], breaks[k+1]), col = adjustcolor("black", 0.6)) } } }, silent = TRUE) if(inherits(tryPlot, "try-error")) tryPlotError(tryPlot) } # Numerical output return(list( # ----------------- arguments input ----------------------- grid.size = lscape.size, buffer.width = buffer.width, variance.X = variance.X, theta.X = theta.X, M = M, beta = beta, quads.along.side = quads.along.side, # ---------------- generated values ------------------------- core = core, # range of x and y coordinates in the 'core' M2 = M2, # Number of ACs in the total landscape (incl. buffer) grid = grid, # Coordinates of the centre of each pixel. pixel.size = pixel.size,# 1; length of side of square pixel of landscape size.core = size.core, # the width = height of the core area prop.core = prop.core, # proportion of the landscape in the core X = field, # lscape.size x lscape.size matrix of covariate values for each pixel probs = probs, # corresponding matrix of probability of AC in pixel (sums to 1) pixel.id = pixel.id, # M2 vector, which pixel each AC is inside. u = u, # M2 x 2 matrix, coordinates of each AC nsite = nsite, # total number of quadrats in the core quad.size = quad.size, # width = height of each quadrat breaks = breaks, # boundaries of each quadrat mid.pt = mid.pt, # mid=points of each quadrat lambda_pp = lambda_pp, # intensity of point pattern (ACs per unit area) Nac = Nac, # matrix, quads.along.side x quads.along.side, site-specific abundance of ACs zac = zac, # matrix, quads.along.side x quads.along.side, 0/1 occurrence E_N = E_N, # scalar, average realized abundance per quadrat. E_z = E_z)) # scalar, average realized occupancy per quadrat. } # ------------ End of function definition --------------------
/scratch/gouwar.j/cran-all/cranData/AHMbook/inst/RandomFieldsSupport/simPPe.R
#' @importFrom methods setRefClass new #' @importFrom reshape2 melt dcast #' @importFrom graphics barplot text #' @importFrom stats ave reorder sd #' @docType package #' @name ahpgaussian #' @export ahpgaussian <- function(x) UseMethod('ahpgaussian', x)
/scratch/gouwar.j/cran-all/cranData/AHPGaussian/R/ahpgaussian.R
#' @importFrom methods setRefClass new #' @importFrom reshape2 melt dcast #' @importFrom graphics barplot text #' @importFrom stats ave reorder sd #' @docType package #' @name ahpgaussian.default #' @export ahpgaussian.default <- function(x) { stopifnot(is.data.frame(x)) x2 <- reshape2::melt(x, id.vars = c(1,dim(x)[2]), measure.vars = -c(1, dim(x)[2])) x2$value <- ifelse(x2$min_max == "min", 1 / x2$value, x2$value) x2 <- transform(x2,sum = ave(value, criteria, FUN = sum)) x2 <- transform(x2,norm = value / sum) x2 <- transform(x2,mean = ave(norm, criteria, FUN = mean)) x2 <- transform(x2,sd = ave(norm, criteria, FUN = sd)) x2 <- transform(x2,factor = sd / mean) x3 <- unique(x2[, c("criteria", "factor")]) x3$factor <- x3$factor / sum(x3$factor) x4 <- x2[, c("criteria", "variable", "norm")] x4 <- reshape2::dcast(x4, formula = criteria ~ variable, value.var = "norm") x5 <- merge(x4, x3, by = "criteria") x5 <- reshape2::melt(x5, id.vars = c("criteria","factor"), measure.vars = -c(1, dim(x)[2])) x5$value_factor <- x5$factor * x5$value x5 <- x5[, c("criteria", "variable", "value_factor")] x5$punctuation <- ave(x5$value_factor, x5$variable, FUN = sum) x5 <- x5[, c("variable", "punctuation")] x5$variable <- as.factor(x5$variable) x5$variable <- reorder(x5$variable, -x5$punctuation) x5 <- droplevels(unique(x5)) x5 <- x5[order(x5$variable), ] x5$rank <- 1 x5$rank <- ave(x5$rank, FUN = cumsum) text(x = barplot(x5$punctuation, names.arg = x5$variable, col = x5$variable, ylim = c(0, max(x5$punctuation) * 1.1)), y = x5$punctuation, labels = paste0(x5$rank), pos = 3, offset = 0.5) results <- list(table1=x2,table2=x3,table3=x5) class(results) <- c('ahpgaussian','list') invisible(results) }
/scratch/gouwar.j/cran-all/cranData/AHPGaussian/R/ahpgaussian.default.R
utils::globalVariables(c("value", "criteria","new"))
/scratch/gouwar.j/cran-all/cranData/AHPGaussian/R/globals.R
#' @importFrom methods setRefClass new #' @importFrom reshape2 melt dcast #' @importFrom graphics barplot text #' @importFrom stats ave reorder sd #' @docType package #' @name summary.ahpgaussian #' @export summary.ahpgaussian <- function(object, presentation=FALSE, ...) { if (!inherits(object, 'ahpgaussian')) stop("Use this function only with 'ahpgaussian' class!") if(!presentation){ d <- length(object$number) x <- list('Table1' = object$table1, 'Table2' = object$table2, 'Table3' = object$table3) class(x) <- c('summary.ahpgaussian', 'listof') x } else { d <- length(object$number) cat(' Table 1:\t\t\t\t', object$table1) cat('\n - Table 2:\t\t\t\t', object$table2) cat('\n - Table 3:\t\t\t\t', object$table3) cat('\n') } }
/scratch/gouwar.j/cran-all/cranData/AHPGaussian/R/summary.ahpgaussian.R
#' @keywords internal "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/AHP-package.R
#'@title calculates saaty's consistency index #'@name CI #'@author Luciane Ferreira Alcoforado # #'@description Function to calculate the saaty's consistency index #' #'@param m is a matrice of pairwise comparison #'@return Returns saaty's consistency index #'@examples #'x=c("c1", "c2", "c3", "c4") #'y=c(3, 9, 2, 8) #'m=matrix_ahp(x,y) #'CI(m) #' #'@examples #'x=c("a1", "a2", "a3", "a4", "a5") #'y=c(1, 9, 1.5, 8, 6) #'m=matrix_ahp(x,y) #'CI(m) #' #'@examples #'m=diag(16)+2-2*diag(16) #'m #'CI(m) #'CR(m) #'@export CI = function(m){ #IR = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45, 1.49, 1.51, 1.48, 1.56, 1.57, 1.59) n = length(m[1,]) lambdamax=max(Re(eigen(m)$values)) IC = (lambdamax - n)/(n-1) return(IC)}
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/CI.R
#'@title calculates saaty's consistency ratio #'@name CR #'@author Luciane Ferreira Alcoforado # #'@description Function to calculate the saaty's consistency ratio #' #'@param m is a matrice of pairwise comparison #'@return Returns saaty's consistency ratio in [0,1] #'@examples #'x=c("c1", "c2", "c3", "c4") #'y=c(3, 9, 2, 8) #'m=matrix_ahp(x,y) #'CR(m) #' #'@examples #'x=c("a1", "a2", "a3", "a4", "a5") #'y=c(1, 9, 1.5, 8, 6) #'m=matrix_ahp(x,y) #'CR(m) #' #'@examples #'m=diag(16)+2-2*diag(16) #'m #'CI(m) #'CR(m) #'@export CR = function(m){ IR = c(0,0,0.58,0.9,1.12,1.24,1.32,1.41,1.45, 1.49, 1.51, 1.48, 1.56, 1.57, 1.59) n = length(m[1,]) IC = CI(m) if(n>15) n=15 RC=ifelse(n>2,IC/IR[n],0) return(RC)}
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/CR.R
#'@title Calculates the AHP #'@name ahp #'@author Lyncoln Oliveira and Luciane Ferreira Alcoforado #'@description Calculates AHP in a list of paired arrays or in a properly formatted excel worksheet stipend. #'@param base List of paired arrays or excel path containing the properly formatted paired arrays. #'@param mapeamento Vector containing the number of subscriptions of each criteria, from left to right. mapeamento = rep(0,n) n = number of criteria and no subcriteria; #'mapeamento = c(1,2) for one subcriteria in criteria 1 anda two subcriteria in criteria 2. If in doubt, see the tutorial vignette. #'@param nomes_alternativas Vector containing the names of the alternatives in your hierarchy, #'if not filled returns a vector of LETTERS. #' #'@return Table containing the relationships of criteria, subscriptions (If any) and Alternatives Using the AHP system. #'@import tibble #'@import dplyr #' #'@examples #'x=paste0(letters[3],1:5) #'y=c(5,2,7,3,2) #'m1=matrix_ahp(x,y) #'x=paste0(letters[1],1:3) #'y=c(4.4,5.2,3) #'m2=matrix_ahp(x,y) #'y=c(2,4,3) #'m3=matrix_ahp(x,y) #'y=c(4.9,5,3.3) #'m4=matrix_ahp(x,y) #'y=c(4.4,4.2,4.3) #'m5=matrix_ahp(x,y) #'y=c(5.4,5.2,5.7) #'m6=matrix_ahp(x,y) #'base=list(m1, m2, m3, m4, m5, m6) #'mapeamento = rep(0,5) #'nomes_alternativas = paste0(letters[1],1:3) #'ahp(base,mapeamento, nomes_alternativas) #' #'#with subcriteria and 3 criteria and 2 alternatives #'mapeamento = c(2,0,0) #2 subcriteria in criteria 1 and 0 subcriteria to others #'x=paste0(letters[3],1:3) #3 criteria #'y=c(5,2,7) #'m1=matrix_ahp(x,y) #compare criteria #'x=paste0(letters[4],1:2) #'y=c(4,6) #'m2=matrix_ahp(x,y) # 2 compare 2 subcriteria of criteria 1 #'x=paste0(letters[1],1:2) #'y=c(2,4) #'m3=matrix_ahp(x,y) #alternatives for subcriteria 1 #'y=c(4.9,5) #'m4=matrix_ahp(x,y) #alternatives for subcriteria 2 #'y=c(4.4,4.2) #'m5=matrix_ahp(x,y) #alternatives for criteria 2 #'y=c(5.4,5.2) #'m6=matrix_ahp(x,y) ##alternatives for criteria 3 #'base=list(m1, m2, m3, m4, m5, m6) #' #'nomes_alternativas = paste0(letters[1],1:2) #'ahp(base,mapeamento, nomes_alternativas) #' #'#Other mapeamento: criteria 2 with 2 subcriteria #' #'mapeamento = c(0,2,0) #'nomes_alternativas = paste0(letters[1],1:2) #'ahp(base,mapeamento, nomes_alternativas) #' #'@export ahp = function(base,mapeamento,nomes_alternativas){ preferencias = calcula_prioridades(base); preferencias objetivo = preferencias[1]; objetivo criterios = preferencias[2:(length(mapeamento)+1)]; criterios names(criterios) = paste0("C",1:length(mapeamento)) alternativas = preferencias[(length(mapeamento) + 2):length(preferencias)]; alternativas matriz_criterios = base[2:(length(mapeamento)+1)] matriz_alternativas = base[(length(mapeamento) + 2):length(base)] #normalizando criterios_normalizados = c() for(i in 1:length(mapeamento)){ criterios_normalizados[[i]] = criterios[[i]] * objetivo[[1]][i] } names(criterios_normalizados) = names(criterios); criterios_normalizados #Gerando nomes para a tabela nomes = c(paste0("---","Alternatives")) CR_saaty = c(CR(base[[1]])) aux = 1 # Me ajuda a me guiar pelas matrizes de subcriterios for(i in 1:length(mapeamento)){ nomes = append(nomes, paste0("--",names(criterios[i]))) CR_saaty = c(CR_saaty, CR(matriz_criterios[[i]])) for(j in 1:mapeamento[i]){ if(mapeamento[i] == 0) break nomes = append(nomes, paste0("-",names(alternativas[aux]))) CR_saaty = c(CR_saaty, CR(matriz_alternativas[[aux]])) aux = aux + 1 } } nomes #Gerando coluna de pesos de critérios e subcritérios pesos = c(sum(objetivo[[1]])) for(i in 1:length(mapeamento)){ pesos = append(pesos, objetivo[[1]][i]) aux = 1 for(j in 1:mapeamento[i]){ if(mapeamento[i] == 0)break pesos = append(pesos,criterios_normalizados[[i]][aux]) aux = aux+1 } } pesos #testando primeira parte tabela = tibble::tibble(Criteria = nomes, Weights = pesos) #tabela #Aqui estou organizando a proporção de cada criterio por alternativas #Lyn: qtd_alternativas = length(alternativas[length(alternativas)][[1]]); qtd_alternativas #Lu: qtd_alternativas = length(nomes_alternativas) #Lyn:pesos_alternativas = list() #Criando a lista que serão preenchidas #for(i in 1:qtd_alternativas){ # pesos_alternativas[[i]] = 0 #} #Lu: substituir código acima for por: pesos_alternativas = vector("list",qtd_alternativas) #names(pesos_alternativas) = LETTERS[1:qtd_alternativas] names(pesos_alternativas) = nomes_alternativas #pesos_alternativas aux = 1 #Navega entre a posição das matrizes de alternativas aux2 = 1 #Navega entra a posição do preenchimento das alternativas na nova lista ordenada por linha for(i in 1:length(mapeamento)){ #Se não existir subcriterios: if(mapeamento[[i]] == 0 ){ for(j in 1:length(criterios_normalizados[[i]])){ pesos_alternativas[[j]][aux2] = criterios_normalizados[[i]][j] } aux2 = aux2 + 1 } #Se existir subcritérios: else{ for(j in 1:length(criterios_normalizados[[i]])){ #print("----") #print(criterios_normalizados[[i]][j]) #print(names(alternativas[aux])) for(k in 1:length(alternativas[aux])){ #print(alternativas[[aux]]) for(p in 1:qtd_alternativas) { #print(alternativas[[aux]][p]) pesos_alternativas[[p]][aux2] = alternativas[[aux]][p]*criterios_normalizados[[i]][j] } aux2 = aux2 + 1 } aux = aux +1 } } } pesos_alternativas ## #Agora irei aplicar a soma de proporções dos critérios para sub critérios pesos_alternativas_organizados = pesos_alternativas for( i in 1:qtd_alternativas){ inferior = 2 superior = 0 pesos_alternativas_organizados[[i]] = c(sum(pesos_alternativas_organizados[[i]]), pesos_alternativas_organizados[[i]]) #print(names(pesos_alternativas[i])) vetor = c(pesos_alternativas_organizados[[i]][1]) #print(pesos_alternativas_organizados[[i]]) for(j in 1:length(mapeamento)){ if(mapeamento[j] == 0 ) { vetor = c(vetor, pesos_alternativas_organizados[[i]][inferior]) inferior = inferior + 1 } else{ superior = inferior + mapeamento[j] - 1 #print("----") #print(pesos_alternativas_organizados[[i]][inferior:superior]) valor = sum(pesos_alternativas_organizados[[i]][inferior:superior]) vetor = c(vetor, valor, pesos_alternativas_organizados[[i]][inferior:superior]) #print(valor) #print("----") inferior = superior + 1 } } pesos_alternativas_organizados[[i]] = vetor } pesos_alternativas_organizados #CR_saaty = unlist(lapply(base, function(x) return(CR(x))),use.names = F); CR_saaty tabela = append(tabela,pesos_alternativas_organizados) tabela = append(tabela, list("CR"= CR_saaty)) return(dplyr::as_tibble(tabela)) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/ahp.R
#'@title Calculates the AHP General #'@name ahp_geral #'@author Lyncoln Oliveira #'@description Calculates AHP in a list of paired arrays or in a properly formatted excel worksheet stipend. #'@param objeto List of paired arrays or excel path containing the properly formatted paired arrays. #'@param mapeamento Vector containing the number of subscriptions of each criteria, from left to right. If not filled the pattern and fill with 0. If in doubt, see the tutorial vignette. #'@param nomes_alternativas Vector containing the names of the alternatives in your hierarchy, #'if not filled returns a vector of LETTERS\[1\:qtdAlternatives\] #' #'@return Table containing the relationships of criteria, subscriptions (If any) and Alternatives Using the AHP system. #' #' #'@examples #'m1=matrix(c(1, 1/5, 3, 1/5, 1/3,5, 1, 5, 3, 3, #' 1/3, 1/5, 1, 1/3, 1/3,5, 1/3, 3, 1, 1,3, 1/3, #' 3, 1, 1),ncol=5,byrow=TRUE) #'m2=matrix(c(1, 1/3, 1/6, 3, 1, 1/2,6, 2, 1),nrow=3, byrow=TRUE) #'m3=matrix(c(1, 1/2, 1/2,2, 1, 2, 2, 1/2, 1),nrow=3, byrow=TRUE) #'m4=matrix(c(1, 1, 2,1, 1, 1, 1/2, 1, 1),nrow=3, byrow=TRUE) #'m5=matrix(c(1, 2, 3,1/2, 1, 2, 1/3, 1/2, 1),nrow=3, byrow=TRUE) #'m6=matrix(c(1, 5, 3,1/5, 1, 1/3, 1/3, 3, 1),nrow=3, byrow=TRUE) #'base=list(m1,m2,m3,m4,m5,m6) #'mapeamento=rep(0,5) #'nomes_alternativas="PADRAO" #'ahp_geral(base,mapeamento, nomes_alternativas) #'@export #' #' ahp_geral = function(objeto, mapeamento = "PADRAO", nomes_alternativas = "PADRAO"){ if(is.character(objeto)) base = ler(objeto) else base = objeto if(mapeamento[1] == "PADRAO") mapeamento = rep(0,dim(base[[1]])[1]) if(nomes_alternativas[[1]] == "PADRAO") nomes_alternativas = LETTERS[1:dim(base[[length(base)]])[1]] tabela = ahp(base, mapeamento,nomes_alternativas) #tabela1 = ahp_s(base, map=mapeamento,nomes_alternativas) # result= list(tb1=tabela, tb2=tabela1) return(tabela) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/ahp_geral.R
#'@title Calculates the AHP for criteria and subcriteria #'@name ahp_s #'@author Luciane Ferreira Alcoforado #'@description Calculates AHP in a list of paired arrays or in a properly formatted excel worksheet stipend. #'@param base List of paired arrays or excel path containing the properly formatted paired arrays. #'@param map Vector containing the number of subscriptions of each criteria, from left to right. map = rep(0,n) n = number of criteria and no subcriteria; #'mapeamento = c(1,2) for one subcriteria in criteria 1 and two subcriteria in criteria 2. If in doubt, see the tutorial vignette. #' #'@return Table containing the relationships of criteria, subscriptions (If any) and Alternatives Using the AHP system. #'@import tibble #'@import dplyr #' #'@examples #'x=paste0(letters[3],1:5) #'y=c(5,2,7,3,2) #'m1=matrix_ahp(x,y) #'x=paste0(letters[1],1:3) #'y=c(4.4,5.2,3) #'m2=matrix_ahp(x,y) #'y=c(2,4,3) #'m3=matrix_ahp(x,y) #'y=c(4.9,5,3.3) #'m4=matrix_ahp(x,y) #'y=c(4.4,4.2,4.3) #'m5=matrix_ahp(x,y) #'y=c(5.4,5.2,5.7) #'m6=matrix_ahp(x,y) #'base=list(m1, m2, m3, m4, m5, m6) #'map = rep(0,5) #'ahp_s(base,map) #' #'#with two subcriteria in criteria 1 and 2 alternatives #'map = c(2,0,0) #'x=paste0(letters[3],1:3) #3 criteria #'y=c(5,2,7) #'m1=matrix_ahp(x,y) # matrix compare three criteria #'x=paste0("SC1",1:2) #'y=c(4,6) #'m2=matrix_ahp(x,y) # 2 matrix compare two subcriteria of criteria 1 #'x=paste0(letters[1],1:2) #'y=c(2,4) #'m3=matrix_ahp(x,y) #alternatives for subcriteria 1 #'y=c(4.9,5) #'m4=matrix_ahp(x,y) #alternatives for subcriteria 2 #'y=c(4.4,4.2) #'m5=matrix_ahp(x,y) #alternatives for criteria 2 #'y=c(5.4,5.2) #'m6=matrix_ahp(x,y) ##alternatives for criteria 3 #'base=list(m1, m2, m3, m4, m5, m6) #' #' #'ahp_s(base,map) #' #'#Other mapeamento: criteria 2 with 2 subcriteria and 3 alternatives #' #' #'map = c(2,2) #'x=paste0(letters[3],1:2) #2 criteria #'y=c(5,7) #'m1=matrix_ahp(x,y) # matrix compare two criteria #'x=paste0("SC1",1:2) #'y=c(4,6) #'m2=matrix_ahp(x,y) # matrix compare two subcriteria of criteria 1 #'x=paste0(letters[1],1:3) #'y=c(2,4,5) #'m3=matrix_ahp(x,y) #alternatives for subcriteria 1 - criteria 1 #'y=c(4.9,5, 2) #'m4=matrix_ahp(x,y) #alternatives for subcriteria 2 - criteria 1 #'y=c(4.4,8, 6) #'x=paste0("SC2",1:2) #'m5=matrix_ahp(x,y) #matrix compare two subcriteria of criteria 2 #'y=c(5.4,5.2, 1) #'x=paste0(letters[1],1:3) #'m6=matrix_ahp(x,y) #alternatives for subcriteria 1 - criteria 2 #'y=c(9,5.2, 3) #'m7=matrix_ahp(x,y) #alternatives for subcriteria 2 - criteria 2 #'base=list(m1, m2, m3, m4, m5, m6, m7) #' #'ahp_s(base,map) #' #'@export ahp_s = function(base,map){ n = ncol(base[[1]]) # #criteria m = dim(base[[length(base)]])[1] # #alternatives supondo ultima matriz contenha comparação de alternativas à luz de criterios/subcriterios k = sum(map) # #subcriteria map[i] is a number of subcriteria in i-criteria. x = sum(map)+length(map[map==0]) # is a number of matrix comparing alternatives y = length(map[map!=0]) # is a number of matrix comparing subcriteria if (length(base)!= (x+y+1)) stop("The number of comparison matrices does not confer!") if (length(map) != n) stop("the map length does not match the number of criteria") #if (sum(map)==0){ ver código do Lyncoln ou ignore essa lógica de separar} #tinha pensado em separar qdo não há subcritérios de quando há. preferencias = calcula_prioridades(base); preferencias #vetores prioridades de acordo com map objetivo = preferencias[1]; objetivo #peso dos critérios P(C) peso_criterio=matrix(nrow = n, ncol = m) peso_subcriterio = matrix(nrow=sum(map),ncol=m) #tem que ver uma sequencia para preferencia[] pegando somente criterios #nomeia criterios rownames(peso_criterio) = paste0("C",1:n) #nomeia alternativas colnames(peso_criterio) = paste0("A",1:m) #nomeia subcriterios para cada criterio, se houver if(sum(map>0)){ seq=NULL mapc= map[map>0] for(i in seq_along(mapc)){ seq = append(seq,1:mapc[i])} #índice de seq de subcriteria, inicia em 1 até n1 depois 1 até n2 e assim por diante rownames(peso_subcriterio) = paste0(rep(paste0("SC",which(map>0)),map[which(map>0)]),seq) colnames(peso_subcriterio) = paste0("A",1:m)} aux = 2 #cria matrizes de pesos dos critérios e subcritérios separadamente auxlinha=1 #controle linha da matriz peso_subcriterio for(k in seq_along(map)){ if (map[k]==0) {peso_criterio[k,] = preferencias[[aux]] aux = aux+1 auxlinha = k+1 } if (map[k]>0) { peso_parcial_critk=matrix(nrow = map[k], ncol = m) #x = n. de subcriterios for(l in (1:map[k])){ peso_parcial_critk[l,] = preferencias[[aux]][l]* preferencias[[(aux+l)]] } peso_subcriterio[auxlinha:(auxlinha+l-1),] = peso_parcial_critk auxlinha = auxlinha+l aux = aux+l+1 peso_criterio[k,] = colSums(peso_parcial_critk) } } if(sum(map)==0){ peso_alternativas = t(matrix(unlist(preferencias[2:(n+1)]),ncol=n)%*%preferencias[[1]]) } if(sum(map)>0){ peso_alternativas = preferencias[[1]]%*%peso_criterio } #Faz a mesma coisa do código anterior só que cria uma tabela com pesos de criterios e subcriterios alternando entre si #calcula tb os CR e inclui o peso total de cada criterio coluna 1 e o peso global das alternativas, linha1 CR_saaty = lapply(base,CR) aux = 2 peso_criterio=matrix(nrow = n, ncol = m) peso_subcriterio = matrix(nrow=sum(map),ncol=m) tabela = matrix(nrow = n+sum(map)+1, ncol=m+2) tabela[1,] = c(1,peso_alternativas,CR(base[[1]])) l_tabela=2 #cria matrizes de pesos dos critérios e subcritérios separadamente auxlinha=1 #controle linha da matriz peso_subcriterio for(k in seq_along(map)){ if (map[k]==0) {peso_criterio[k,] = preferencias[[aux]] aux = aux+1 #controla posicao do vetor prioridade auxlinha = k+1 #controla número de linhas na tabela final tabela[l_tabela,]= c(preferencias[[1]][k],peso_criterio[k,]*objetivo[[1]][k], CR_saaty[[l_tabela]]) #modifiquei calculo peso_criterio[k,] multiplicando por P(Ck) (objetivo[[1]][k]) l_tabela = l_tabela+1 } if (map[k]>0) { peso_parcial_critk=matrix(nrow = map[k], ncol = m) for(l in (1:map[k])){ peso_parcial_critk[l,] = preferencias[[aux]][l]* preferencias[[(aux+l)]] tabela[l_tabela,]=c(preferencias[[aux]][l],peso_parcial_critk[l,], CR_saaty[[l_tabela+1]]) #mudei aqui NA para preferencias[[aux]][l] l_tabela = l_tabela+1 } peso_subcriterio[auxlinha:(auxlinha+l-1),] = peso_parcial_critk auxlinha = auxlinha+l aux = aux+l+1 peso_criterio[k,] = colSums(peso_parcial_critk)*objetivo[[1]][k] #acrescentei *objetivo[[1]][k] tabela[l_tabela,]=c(preferencias[[1]][k],peso_criterio[k,],CR_saaty[[(l_tabela-map[k])]]) l_tabela = l_tabela+1 } } #Novo calculo do peso_criterio peso_criterio1 = objetivo[[1]]*peso_criterio #nomeia linhas da tabela contendo pesos de criterios e subcriterios #uso seq criado anteriormente para numerar os subcritérios linha=1 nome_tabela=NULL for(j in seq_along(map)){ if(map[j]>0){ for(i in 1:map[j]){ nome_tabela=append(nome_tabela,paste0("--SC",j,i))} } nome_tabela=append(nome_tabela,paste0("-C",j))} rownames(tabela) = c("Alternatives->",nome_tabela) colnames(tabela) = c("Weithts",paste0("A",1:m), "CR") tabela #confere até aqui tabela1 = tibble::tibble(criteria=rownames(tabela), tibble::as_tibble(tabela)) return(dplyr::as_tibble(tabela1)) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/ahp_s.R
#'@title Calculates the eigen vector of matrix #'@name autoVetor #'@author Lyncoln Oliveira #'@description Calculates the eigen vector of matrix #'@param matriz a paired matrix #' #'@return Returns a normalized eigenvector #' #'@examples #'m=diag(16)+2-2*diag(16) #'m #'autoVetor(m) #' #'@export #' autoVetor = function(matriz){ #Achando o autovetor associado ao maior autovalor autoValores = Re(eigen(matriz)$values) autoVetores = Re(eigen(matriz)$vectors) autoValorMax = which.max(autoValores) autoVetorAssociado = autoVetores[,autoValorMax] autoVetorNormalizado = autoVetorAssociado/sum(autoVetorAssociado) return(autoVetorNormalizado)}
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/autoVetor.R
#'@title Calculates the priority vector of a paired matrix #'@name calcula_prioridades #'@author Lyncoln Oliveira #'@description Calculates the priority vector of a paired array based on a list #'@param lista a paired matrix list #' #'@return Returns a list containing priority vectors for each matrix in the read list #' #'@export calcula_prioridades = function(lista){ prioridades = lapply(lista, function(x) autoVetor(x)) return(prioridades) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/calcula_prioridades.R
#'@title creates a hierarchical structure #'@name flow_chart #'@author Luciane Ferreira Alcoforado # #'@description Function to build the Diagram of hierarchies #' #'@param names is a vector with names for goal, criteria and choices in this exact sequence #'@param c is a integer number of criteria, c>=2 #'@param a is a integer number of choices, a>=2 #'@return Returns Diagram of hierarchies #'@examples #'p=flow_chart(names=NULL, a=2, c=2) #'p #' #'p=flow_chart(names=NULL, a=2, c=3) #'p+ggplot2::theme_void() #' #'flow_chart(names=c("G", "cost", "time", "hour", "home", "beach"),c=3, a=2) #' #'@importFrom igraph graph_from_data_frame #'@importFrom igraph layout_as_tree #'@importFrom igraph vertex_attr #'@import ggplot2 #'@import dplyr #'@importFrom tidyr pivot_longer #'@importFrom tibble as_tibble #'@export #' flow_chart = function(names, c, a){ #require(ggplot2) #require(igraph) require(tibble) require(tidyr) require(dplyr) if (any(c < 2)) stop("need positive value c >=2") if (any(a < 2)) stop("need positive value a >=2") if(is.null(names)) names=c("Goal", paste0("c",1:c), paste0("a",1:a)) dt=data.frame(from = c(rep(names[1],c), rep(names[2:(c+1)],rep(a,c))),to=c(names[2:(c+1)], rep(names[(c+2):(c+a+1)],c))) g = igraph::graph_from_data_frame(dt, directed = TRUE) coords = igraph::layout_as_tree(g) colnames(coords) = c("x", "y") coords[(c+2): (c+a+1),1]=coords[(c+2): (c+a+1),1]+1 step = igraph::vertex_attr(g, "name") tp = factor(c("Goal", rep("criteria",c), rep("choices",a))) output_df = tibble::as_tibble(coords) x = output_df$x #na prática não precisa, coloquei por erro no cran y = output_df$y #na prática não precisa, coloquei por erro no cran output_df = output_df %>% dplyr::mutate( #label = gsub("\\d+$", "", step), step = step, label = step, x = x*1, type = tp) plot_nodes = output_df %>% dplyr::mutate(xmin = x - 0.25, xmax = x + 0.25, ymin = y - 0.25, ymax = y + 0.25) plot_edges = dt %>% dplyr::mutate(id = dplyr::row_number()) %>% tidyr::pivot_longer(cols = c("from", "to"), names_to = "s_e", values_to = "step") %>% dplyr::left_join(plot_nodes, by = "step") %>% dplyr::select(-c(label, type, y, xmin, xmax)) %>% dplyr::mutate(y = ifelse(s_e == "from", ymin, ymax)) %>% dplyr::select(-c(ymin, ymax)) p = ggplot2::ggplot() + ggplot2::coord_fixed()+ ggplot2::geom_rect(data = plot_nodes, mapping = ggplot2::aes(xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax, fill = type, colour = type), alpha = 0.5) p = p + ggplot2::geom_text(data = plot_nodes, mapping = ggplot2::aes(x = x, y = y, label = label), #family = "Times New Roman", color = "#685c50") p = p + ggplot2::geom_path(data = plot_edges, mapping = ggplot2::aes(x = x, y = y, group = id), colour = "#685c50", arrow = ggplot2::arrow(length = ggplot2::unit(0.2, "cm"), type = "closed"))+ ggplot2::labs(title = "Hierarchical Tree of Decision", caption = "R-package AHPWR, 2022")+ ggplot2::theme(axis.text.x = ggplot2::element_blank(), axis.ticks.x = ggplot2::element_blank(), axis.text.y = ggplot2::element_blank(), axis.ticks.y = ggplot2::element_blank()) return(p)}
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/flow_chart.R
#'@title Format an AHP table created by the general ahp() function #'@name formata_tabela #' #'@author Lyncoln Oliveira #' #'@description Format an AHP table created by the general ahp() function #' #'@param tabela AHP table created by the general ahp() function #'@param cores Color pattern to format the table. If "PADRAO" returns the color pattern (green, blue, green or blue); if "GRAY" returns the default gray color; if "WHITE" returns the table without colors #' #'@return Returns a table formatted with background colors responsive to element priority amounts #' #' #'@import formattable #' #'@export #' formata_tabela = function(tabela, cores = "PADRAO"){ #require(formattable) if(cores[1] == "PADRAO"){ #Cores escolhidas utilizando a regra de harmonia de cores triade limiteInferiorCriterios = "#DeF7E9" limiteSuperiorCriterios = "#71CA97" limiteInferiorAlternativas = "#B6D4FF" limiteSuperiorAlternativas = "#0060D3" limiteInferiorCR = "#ff7f7f" limiteSuperiorCR = "#B0FFD5" cor_letra = "black" } if(cores[1] == "GRAY"){ limiteInferiorCriterios = "#9e9e9e" limiteSuperiorCriterios = "#4f4f4f" limiteInferiorAlternativas = "#9e9e9e" limiteSuperiorAlternativas = "#4f4f4f" limiteInferiorCR = "#4f4f4f" limiteSuperiorCR = "#9e9e9e" cor_letra = "white" } if(cores[1] == "WHITE"){ limiteInferiorCriterios = "#ffffff" limiteSuperiorCriterios = "#ffffff" limiteInferiorAlternativas = "#ffffff" limiteSuperiorAlternativas = "#ffffff" limiteInferiorCR = "#ffffff" limiteSuperiorCR = "#ffffff" cor_letra = "black" } numero_linhas = dim(tabela)[1] numero_colunas = dim(tabela)[2] tabela_porcento = transforma_tabela(tabela) maior_alternativa = round(max(100*as.numeric(unlist(lapply(tabela[1,3:(numero_colunas-1)],function(x) gsub("%","",x))))),2) formato = function(cor1,cor2){formattable::formatter(.tag = "span", style =function(x)formattable::style("background-color" =formattable::csscolor(formattable::gradient(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))), cor1, cor2)), "border-radius" = "4px", "color" = cor_letra, display = "block")) } formata_maior_alternativa = formattable::formatter("span", style = x ~ formattable::style("font-weight" = ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) == maior_alternativa, "bold", NA), "font-size" = ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) == maior_alternativa, "130%", NA))) formato_CR = formattable::formatter(.tag = "span", style =function(x)formattable::style("background-color" =ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) >= 10,limiteInferiorCR,limiteSuperiorCR), "border-radius" = "4px", "color" = cor_letra, display = "block")) tabela_formatada = formattable::formattable(tabela_porcento, align = c("l",rep("c", numero_colunas - 1)), list( "Criteria" = formattable::formatter("span", style = ~ formattable::style(color = "grey",font.weight = "bold")), formattable::area(row = 2:(numero_linhas), col = 2) ~ formato(limiteInferiorCriterios,limiteSuperiorCriterios), formattable::area(row = 2:(numero_linhas), col = 3:(numero_colunas-1)) ~ formato(limiteInferiorAlternativas,limiteSuperiorAlternativas), formattable::area(col = numero_colunas) ~ formato_CR, formattable::area(row = 1, col = (3:numero_colunas-1)) ~ formata_maior_alternativa ) ) return(tabela_formatada) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/formata_tabela.R
#'@title Format an AHP table created by the general ahp() function #'@name formata_tabela2 #' #'@author Lyncoln Oliveira #' #'@description Format an AHP table created by the general ahp() function #' #'@param tabela AHP table created by the general ahp() function #'@param cores Color pattern to format the table. If "PADRAO" returns the color pattern (green, blue, green or blue); if "GRAY" returns the default gray color; if "WHITE" returns the table without colors #' #'@return Retorna uma tabela formatada com cores defundo responsivas as quantidades de prioridade dos elementos #' #'@importFrom formattable formatter style csscolor gradient area #' #'@export #' formata_tabela2 = function(tabela, cores = "PADRAO"){ #require(formattable) if(cores[1] == "PADRAO"){ #Cores escolhidas utilizando a regra de harmonia de cores triade limiteInferiorCriterios = "#DeF7E9" limiteSuperiorCriterios = "#71CA97" limiteInferiorAlternativas = "#B6D4FF" limiteSuperiorAlternativas = "#0060D3" limiteInferiorCR = "#ff7f7f" limiteSuperiorCR = "#B0FFD5" cor_letra = "black" } if(cores[1] == "GRAY"){ limiteInferiorCriterios = "#9e9e9e" limiteSuperiorCriterios = "#4f4f4f" limiteInferiorAlternativas = "#9e9e9e" limiteSuperiorAlternativas = "#4f4f4f" limiteInferiorCR = "#4f4f4f" limiteSuperiorCR = "#9e9e9e" cor_letra = "white" } if(cores[1] == "WHITE"){ limiteInferiorCriterios = "#ffffff" limiteSuperiorCriterios = "#ffffff" limiteInferiorAlternativas = "#ffffff" limiteSuperiorAlternativas = "#ffffff" limiteInferiorCR = "#ffffff" limiteSuperiorCR = "#ffffff" cor_letra = "black" } numero_linhas = dim(tabela)[1] numero_colunas = dim(tabela)[2] tabela_porcento = transforma_tabela(tabela) maior_alternativa = round(max(100*as.numeric(unlist(lapply(tabela[1,3:(numero_colunas-1)],function(x) gsub("%","",x))))),2) formato = function(cor1,cor2){formatter(.tag = "span", style =x ~ style("background-color" =csscolor(gradient(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))), cor1, cor2)), "border-radius" = "4px", "color" = cor_letra, display = "block")) } formata_maior_alternativa = formatter("span", style = x ~ style("font-weight" = ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) == maior_alternativa, "bold", NA), "font-size" = ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) == maior_alternativa, "130%", NA))) formato_CR = formatter(.tag = "span", style =x ~ style("background-color" =ifelse(as.numeric(unlist(lapply(x,function(x) gsub("%","",x)))) >= 10,limiteInferiorCR,limiteSuperiorCR), "border-radius" = "4px", "color" = cor_letra, display = "block")) tabela_formatada = formattable(tabela_porcento, align = c("l",rep("c", numero_colunas - 1)), list( "Criteria" = formatter("span", style = ~ style(color = "grey",font.weight = "bold")), area(row = 2:(numero_linhas), col = 2) ~ formato(limiteInferiorCriterios,limiteSuperiorCriterios), area(row = 2:(numero_linhas), col = 3:(numero_colunas-1)) ~ formato(limiteInferiorAlternativas,limiteSuperiorAlternativas), area(col = numero_colunas) ~ formato_CR, area(row = 1, col = (3:numero_colunas-1)) ~ formata_maior_alternativa ) ) return(tabela_formatada) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/formata_tabela2.R
utils::globalVariables(c("Alternativas", "Pesos", "Ranque", "label", "s_e", "step", "type", "x" ,"xmax" ,"xmin", "y" ,"ymax", "ymin"))
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/globals.R
#'@title Read an excel file containing the paired matrices and turn all your spreadsheets into a list of matrices in R #'@name ler #'@author Lyncoln Oliveira #' #'@description Function to Read an excel file containing the paired matrices and turn all your spreadsheets into a list of matrices in Re #' #' #'@param caminho Address to an excel file that contains the worksheets #' #'@return Returns a list containing the paired arrays from the excel file #' #'@examples #'caminho <- system.file("tests", "test_import.xlsx", package = "xlsx") #'lista = ler(caminho) #' #'@import readxl #' #'@export ler = function(caminho){ #require(readxl) planilhas = readxl::excel_sheets(caminho) (system.file("exdata", caminho, package = 'AHP')) matrizes = suppressMessages(lapply(planilhas ,function(x) readxl::read_excel(path = caminho, col_names = FALSE, sheet = x))) names(matrizes) = planilhas return(matrizes) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/ler.R
#'@title creates a dataframe containing the judments holistic #'@name matrix_ahp #'@author Luciane Ferreira Alcoforado # #'@description Function to build the judment matrix #' #'@param x is a vector of names criteria or choices #'@param y is a vector of weigth scale Saaty, in [1,9] #'@return Returns a judment matrice. #'@examples #'x=c("c1", "c2", "c3", "c4") #'y=c(3, 9, 2, 8) #'matrix_ahp(x,y) #' #'@examples #'x=c("a1", "a2", "a3", "a4", "a5") #'y=c(1, 9, 1.5, 8, 6) #'matrix_ahp(x,y) #'@export matrix_ahp = function(x, y){ n=length(x) m = diag(n) colnames(m) = x rownames(m) = x for(i in 1:(n-1)){ for(j in (i+1):n){ m[i,j] = ifelse(y[i]<y[j], 1/(y[j]-y[i]+1),(y[i]-y[j]+1)) m[j,i] = 1/m[i,j] } } return(m)}
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/matrix_ahp.R
#'@title Create paired matrix and can test saaty consistency rate #'@name matriz_julgamento #'@author Lyncoln Oliveira # #'@description Function that Create paired matrix and can test saaty consistency rate #' #'@param n_comp Number of elements to be evaluated #'@param CR If TRUE also returns the consistency rate of saaty, if FALSE returns only matrix #'@param n_matrix Number of matrix to be created #' #'@return Returns a list with 2 positions. First position contains the paired matrices and the second position their consistency rates #' #'@export matriz_julgamento = function(n_comp,CR = TRUE, n_matrix = 1){ matrizes = list() erros = c() conjunto = list() for(k in 1:n_matrix){ if(n_matrix != 1) print(paste0("fill the matrix ",as.character(k))) matriz = diag(1, n_comp ,n_comp) for (i in 1:(n_comp-1)){ for(j in (i+1):(n_comp)){ valor = eval(parse(text = (readline(paste0("How important is the criterion? ",as.character(i)," in relation to the criterion ",as.character(j),": "))))) matriz[i,j] = valor matriz[j,i] = 1/valor } } matrizes[[k]] = matriz if(CR == TRUE) erros[k] = CR(matriz) } conjunto[[1]] = matrizes; names(conjunto) = "Matrix" if(CR == TRUE) {conjunto[[2]] = erros; names(conjunto) = c("Matrix","CR")} return(conjunto) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/matriz_julgamento.R
#'@title generates vectors of weights for criteria and alternatives #'@name normaliza #'@author Lyncoln Oliveira #' #'@description Function that generates vectors of weights for criteria and alternatives #' #'@param lista is a list with judment matrices #'@return Returns auxiliary list #'@examples #'lista = list(M1=diag(3), M2=diag(3)+4-4*diag(3)) #'normaliza(lista) #' #'@export normaliza = function(lista){ #require(magrittr) lista_aux = list() for( i in 1:length(lista)){ aux = lista[[i]] %>% apply(2,sum) lista[[i]] = t(apply(lista[[i]], 1, function(x)x/aux)) lista_aux[[i]] = apply(lista[[i]],1, sum)/length(lista[[i]][1,]) } names(lista_aux) = names(lista) return(lista_aux) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/normaliza.R
#'@title Calculates the ranking of alternatives #'@name ranque #'@author Lyncoln Oliveira #'@description Calculates ranking of alternatives for a list of judment matrix #'@param tabela table building by ahp_s or ahp_geral #'@return Table containing the ranking of alternatives #' #' #'@examples #'x=paste0(letters[3],1:5) #'y=c(5,2,7,3,2) #'m1=matrix_ahp(x,y) #'x=paste0(letters[1],1:3) #'y=c(4.4,5.2,3) #'m2=matrix_ahp(x,y) #'y=c(2,4,3) #'m3=matrix_ahp(x,y) #'y=c(4.9,5,3.3) #'m4=matrix_ahp(x,y) #'y=c(4.4,4.2,4.3) #'m5=matrix_ahp(x,y) #'y=c(5.4,5.2,5.7) #'m6=matrix_ahp(x,y) #'base=list(m1, m2, m3, m4, m5, m6) #'mapeamento = rep(0,5) #'nomes_alternativas = paste0(letters[1],1:3) #'tabela = ahp(base,mapeamento, nomes_alternativas) #'ranque(tabela) #' #'#with subcriteria and 3 criteria and 2 alternatives #'mapeamento = c(2,0,0) #2 subcriteria in criteria 1 and 0 subcriteria to others #'x=paste0(letters[3],1:3) #3 criteria #'y=c(5,2,7) #'m1=matrix_ahp(x,y) #compare criteria #'x=paste0(letters[4],1:2) #'y=c(4,6) #'m2=matrix_ahp(x,y) # 2 compare 2 subcriteria of criteria 1 #'x=paste0(letters[1],1:2) #'y=c(2,4) #'m3=matrix_ahp(x,y) #alternatives for subcriteria 1 #'y=c(4.9,5) #'m4=matrix_ahp(x,y) #alternatives for subcriteria 2 #'y=c(4.4,4.2) #'m5=matrix_ahp(x,y) #alternatives for criteria 2 #'y=c(5.4,5.2) #'m6=matrix_ahp(x,y) ##alternatives for criteria 3 #'base=list(m1, m2, m3, m4, m5, m6) #' #'nomes_alternativas = paste0(letters[1],1:2) #'tabela = ahp(base,mapeamento, nomes_alternativas) #'ranque(tabela) #' #'@import dplyr #'@import tidyr #' #'@export ranque = function(tabela){ num_colunas = length(tabela[1,]) nun_linhas = length(tabela[,1]) alternativas = tabela[1,3:(num_colunas-1)] return(dplyr::select(dplyr::mutate(dplyr::arrange(tidyr::gather(alternativas,Alternativas,Pesos),desc(Pesos)),Ranque = c(1:length(alternativas[1,]))),Ranque,dplyr::everything())) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/ranque.R
#'@title organizes a table with comparison matrix data #'@name tabela_holistica #'@author Luciane Ferreira Alcoforado & Orlando Longo #' #'@description Function to organizes a table with comparison matrix data #' #'@param pesos vector with holistic weights for comparison or comparison matrix data #' #'@return Returns a table with assigned holistic weights, comparison matrix, priority vector and consistency ratio #' #'@examples #'pesos = c(3, 7, 9, 2) #'names(pesos) = paste0("C",1:4) #'tabela_holistica(pesos) #' #'@examples #'m = matrix_ahp(y=c(3,4,3,2.5), x=paste0("A",1:4)) #'tabela_holistica(pesos=m) #' #'@import dplyr #' #'@export tabela_holistica = function(pesos){ #require(dplyr) if(is.vector(pesos)){ if (is.null(names(pesos))) paste0("E",1:length(pesos)) m2 = (matrix_ahp(x=names(pesos), y=pesos)) l1=pesos l2 = autoVetor(m2); names(l2) = names(pesos) c2 = c(rep("_",length(pesos)+1),round(CR(m2),2)) c1=c("weights", names(pesos),"priority") tabela = dplyr::bind_rows(pesos,as.data.frame(m2)) tabela = dplyr::bind_rows(tabela,l2) tabela = dplyr::bind_cols(tabela,CR=c2) tabela = dplyr::bind_cols(c1,tabela) } if(is.matrix(pesos)){ if (is.null(colnames(pesos))) colnames(pesos) = paste0("E",1:length(pesos)) l2 = autoVetor(pesos); names(l2) = colnames(pesos) c2 = c(rep("_",length(l2)),round(CR(pesos),2)) c1=c(colnames(pesos),"priority") tabela = dplyr::bind_rows(as.data.frame(pesos),l2) tabela = dplyr::bind_cols(tabela,CR=c2) tabela = dplyr::bind_cols(c1,tabela) row.names(tabela) <- NULL } return(tabela) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/tabela_holistica.R
#'@title Transforms table with numbers into percentage with 2 decimal places #'@name transforma_tabela #'@author Lyncoln Oliveira #' #'@description Function to Transforms table with numbers into percentage with 2 decimal places #' #'@param tabela table to transforms #' #'@return Returns a transform table with percentage #' #'@examples #'tabela=data.frame(x=c(0.5, 0.25), y=c(0.55, 0.93)) #'transforma_tabela(tabela) #' #'@import dplyr #'@export transforma_tabela = function(tabela){ #require(dplyr) numero_linhas = dim(tabela)[1] numero_colunas = dim(tabela)[2] tabela_porcento = dplyr::mutate_if(tabela, is.numeric, function(x) paste0(round(100*x,2),"%")) #tabela_porcento = dplyr::slice(tabela_porcento, numero_linhas, 1:(numero_linhas - 1)) #nomes_criterios = c(tabela_porcento$Criterios[1], unlist(lapply(tabela_porcento$Criterios[2:numero_linhas],function(x) paste0("- ",x)))) #tabela_porcento = dplyr::mutate(tabela_porcento) return(tabela_porcento) }
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/transforma_tabela.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs #' @param lhs A value or the magrittr placeholder. #' @param rhs A function call using the magrittr semantics. #' @return The result of calling `rhs(lhs)`. NULL
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/utils-pipe.R
#'@title build file with judment matrices #'@name xlsx_ahp #'@author Luciane Ferreira Alcoforado # #'@description Function to buil file with judment matrices #' #'@param m is a matrice of pairwise comparison #'@param file is the path to the output file. #'@param sheet is a character string with the sheet name. #'@param append is a logical value indicating if m should be appended to an existing file. If TRUE the file is read from disk. #'@return Returns a xlsx document #' #'#m=diag(10) #'#file1 = xlsx_ahp(m, file = "Example_1.xlsx", sheet = "M1", append = FALSE) #'#file2=xlsx_ahp(m, file = "Example_1.xlsx", sheet = "M2", append = TRUE) #' #'#see file Example_1.XLSX in working directory #' #'@import xlsx #' #' #'@export xlsx_ahp = function(m, file, sheet, append){ #require(xlsx) if(append == T){ xlsx::write.xlsx(x=m, file=file, sheetName = sheet, append = TRUE)} else{ xlsx::write.xlsx(x=m, file=file, sheetName = sheet, append = FALSE)} } #atualizar kit Java https://www.oracle.com/java/technologies/downloads/#jdk19-windows #
/scratch/gouwar.j/cran-all/cranData/AHPWR/R/xlsx_ahp.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- knitr::opts_chunk$set(warning = FALSE, message = FALSE) library(AHPWR) library(kableExtra) ## ----------------------------------------------------------------------------- #generic, c= 4 criteria and a = 3 alternatives flow_chart(names=NULL, c=4, a=3) ## ----------------------------------------------------------------------------- #generic, c= 4 criteria and a = 3 alternatives p=flow_chart(names=NULL, c=4, a=3) p+ggplot2::labs(title = "A tree level hierarchy", x="", y="") ## ----------------------------------------------------------------------------- #generic, c= 4 criteria and a = 3 alternatives p=flow_chart(names=NULL, c=4, a=3) p+ggplot2::labs(title = "A tree level hierarchy", x="", y="")+ggplot2::theme_void() ## ----------------------------------------------------------------------------- #generic, c= 4 criteria and a = 3 alternatives goal = "Satisfation with House" criterios = c("Size", "Age", "Yard", "Neighborhood" ) alternatives = c("house A", "house B", "house C") names = c(goal, criterios, alternatives) p=flow_chart(names, c=4, a=3) p+ggplot2::labs(title = "A tree level hierarchy", x="", y="")+ggplot2::theme_void() ## ----echo=FALSE--------------------------------------------------------------- `Intensity of importance` = 1:9 Definicion = c("Equal Importance", " Weak", "Moderate importance", "Moderate plus", "Strong importance", "Strong plus", "Very strong or demonstrated importance", "Very, very strong", "Extreme importance") tab = data.frame(`Intensity of importance`, Definicion) knitr::kable(tab, caption = "Table 1: The fundamental Scale") ## ----------------------------------------------------------------------------- x = c("life cycle", "maintenance cost", "environmental impacts", "construction cost") #criteria y = c(2,5,2,3) #weights m1 = matrix_ahp(x,y) m1 ## ----------------------------------------------------------------------------- names(y) = x table=tabela_holistica(pesos=y) table knitr::kable(table) ## ----------------------------------------------------------------------------- require(magrittr) require(kableExtra) knitr::kable(as.data.frame(table), align = 'c', digits = 2) %>% row_spec(1, italic = TRUE, background = 'gray') %>% row_spec(2:5, color = 'black', background = 'yellow') %>% row_spec(6, underline = TRUE, color = 'black',background = 'gray',bold = TRUE,) %>% column_spec(6, background = 'gray') ## ----------------------------------------------------------------------------- x = c("bridge", "tunnel") #criteria life cycle y = c(1,3) #weights m2 = matrix_ahp(x,y) m2 ## ----------------------------------------------------------------------------- names(y) = x table=tabela_holistica(pesos=y) table ## ----------------------------------------------------------------------------- x = c("bridge", "tunnel") #criteria maintenance cost y = c(1,4) #weights m3 = matrix_ahp(x,y) m3 ## ----------------------------------------------------------------------------- names(y) = x table=tabela_holistica(pesos=y) table ## ----------------------------------------------------------------------------- x = c("bridge", "tunnel") #criteria environmental impacts y = c(1,2) #weights m4 = matrix_ahp(x,y) m4 ## ----------------------------------------------------------------------------- names(y) = x table=tabela_holistica(pesos=y) table ## ----------------------------------------------------------------------------- x = c("bridge", "tunnel") #criteria construction cost y = c(5,3) #weights m5 = matrix_ahp(x,y) m5 ## ----------------------------------------------------------------------------- names(y) = x table=tabela_holistica(pesos=y) table ## ----------------------------------------------------------------------------- #consistency index CI(m1) CI(m2) CI(m3) CI(m4) CI(m5) ## ----------------------------------------------------------------------------- #consistency ratio CR(m1) CR(m2) CR(m3) CR(m4) CR(m5) ## ----------------------------------------------------------------------------- lista = list(m1, m2, m3, m4, m5) calcula_prioridades(lista) ## ----------------------------------------------------------------------------- lista ahp_geral(lista) ## ----------------------------------------------------------------------------- x=paste0(letters[3],1:5) #criteria names C1, C2, ..., C5 y=c(5,2,7,3,2) #judgments m1=matrix_ahp(x,y) x=paste0(letters[1],1:3) #alternatives names A1, A2, A3 y=c(4.4,5.2,3) m2=matrix_ahp(x,y) y=c(2,4,3) m3=matrix_ahp(x,y) y=c(4.9,5,3.3) m4=matrix_ahp(x,y) y=c(4.4,4.2,4.3) m5=matrix_ahp(x,y) y=c(5.4,5.2,5.7) m6=matrix_ahp(x,y) base=list(m1, m2, m3, m4, m5, m6) base calcula_prioridades(base) #fornece somente os vetores prioridades lapply(base,tabela_holistica) #fornece uma tabela com a matriz de comparação o vetor prioridade e o CR. ahp_geral(base) ## ----------------------------------------------------------------------------- table1 = ahp_geral(base) transforma_tabela(table1) ## ----------------------------------------------------------------------------- formata_tabela(table1) formata_tabela(table1, cores = "GRAY") formata_tabela(table1, cores = "WHITE") ## ----------------------------------------------------------------------------- ranque(table1) ## ----------------------------------------------------------------------------- #two criteria, each with two subcriteria map = c(2,2) #x with names and y with holistic judgment x=paste0(letters[3],1:2) #2 criteria y=c(5,7) m1=matrix_ahp(x,y) # matrix compare two criteria x=paste0("SC1",1:2) y=c(4,6) m2=matrix_ahp(x,y) # matrix compare two subcriteria of criteria 1 x=paste0(letters[1],1:3) y=c(2,4,5) m3=matrix_ahp(x,y) #alternatives for subcriteria 1 - criteria 1 y=c(4.9,5, 2) m4=matrix_ahp(x,y) #alternatives for subcriteria 2 - criteria 1 y=c(4.4,8, 6) x=paste0("SC2",1:2) m5=matrix_ahp(x,y) #matrix compare two subcriteria of criteria 2 y=c(5.4,5.2, 1) x=paste0(letters[1],1:3) m6=matrix_ahp(x,y) #alternatives for subcriteria 1 - criteria 2 y=c(9,5.2, 3) m7=matrix_ahp(x,y) #alternatives for subcriteria 2 - criteria 2 base=list(m1, m2, m3, m4, m5, m6, m7) base ## ----------------------------------------------------------------------------- #Priority vector and CR # calcula_prioridades(base) #fornece somente os vetores prioridades lapply(base,tabela_holistica) #fornece uma tabela com a matriz de comparação o vetor prioridade e o CR. ahp_s(base,map) tb = ahp_s(base,map) transforma_tabela(tb) formata_tabela(tb) ## ----------------------------------------------------------------------------- p1=c(2,4,5,1,6,3) #holistcs weights for compare 6 criteria p2=c(5, 4, 6, 7) #holistcs weights for compare 4 alternatives for criterion 1 p3=c(2, 8, 2, 7) #holistcs weights for compare 4 alternatives for criterion 2 p4=c(5, 1, 4, 1) #holistcs weights for compare 4 alternatives for criterion 3 p5=c(3.4, 4, 2, 3) #holistcs weights for compare 4 alternatives for criterion 4 p6=c(6, 4, 2, 2.5) #holistcs weights for compare 4 alternatives for criterion 5 p7=c(5, 3, 6, 1.8) #holistcs weights for compare 4 alternatives for criterion 6 x1=paste0("C",1:6) x= paste0("A",1:4) m1 = matrix_ahp(x1,p1) m2 = matrix_ahp(x,p2) m3 = matrix_ahp(x,p3) m4 = matrix_ahp(x,p4) m5 = matrix_ahp(x,p5) m6 = matrix_ahp(x,p6) m7 = matrix_ahp(x,p7) base=list(m1,m2, m3, m4, m5, m6, m7) formata_tabela(ahp_geral(base)) formata_tabela(ahp_s(base, map=c(0,0,0,0,0,0))) ## ----echo=FALSE--------------------------------------------------------------- #para checar o pacote #devtools::check(args = c("--as-cran"), check_dir = dirname(getwd()))
/scratch/gouwar.j/cran-all/cranData/AHPWR/inst/doc/Intro_to_AHP.R