content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @title Generation of Artificial Data #' #' @description The function generates a set of artificial data, including covariates generated by uniform #' distribution with an interval \code{[0.5, 0.5]}, survival time and censoring status with measurement error and misclassifications. #' In this function, users can specify different degrees of measurement #' error that links observed survival time with true survival time, and links observed #' censoring status with true censoring status. Moreover, the accelerated functional failure time model considered in #' function is given by \code{T=f(X1)+f(X2)+f(X3)+f(X4)+error}, where \code{T} is log failure time and \code{f(X1)=4*x1^2+x1}, #' \code{f(X2)=sin(6*x2)},\code{f(X3)=cos(6*x3)-1} and \code{f(X4)=4*x4^3+x4^2}. #' #' #' @param n Sample size. #' @param p The number of covariates. #' @param pi_01 Misclassifcation probability is P(Observed Censoring Status = 0 | Actual Censoring Status = 1). #' @param pi_10 Misclassifcation probability is P(Observed Censoring Status = 1 | Actual #' Censoring Status = 0). #' @param gamma0 A scalar that links the observed survival time and true survival time in #' the classical additive measurement error model \code{y*=y+gamma0+gamma1*X+v}, where y* is observed survival time and \code{y} is true survival time, and \code{x} is covariates and v is noise term. #' @param gamma1 A \code{p}-dimensional vector of parameters in the #' additive measurement error model \code{y*=y+gamma0+gamma1*X+v}, where \code{y*} #' is observed survival time and \code{y} is true survival time, \code{x} is covariates and \code{v} is #' noise term. #' @param e_var The variance of noise term \code{v} in the additive measurement #' error model \code{y*=y+gamma0+gamma1*X+v}, where \code{v} is assumed to follow a normal #' distribution. #' #' @return generated_data \code{c(n,p+2)} dimensional data frame. The first column is observed survival time and #' second column is observed censoring status, and the other columns are covariates. #' #' @examples #' ## Set the relationship between observed survival time #' ## and true survival time equals y*= y+1+X1+v, where the variance is #' ## 0.75 with n=500 and p=50 and misclassification probability=0.9. #' #' a <- matrix(0,ncol=50, nrow = 1); a[1,1] <- 1 #' data <- data_gen(n=500, p=50, pi_01=0.9, pi_10 = 0.9, gamma0=1, #' gamma1=a, e_var=0.75) #' @export #' @importFrom stats "rnorm" "runif" #' #' data_gen <- function(n,p,pi_01,pi_10,gamma0,gamma1,e_var){ f1 <- function(x1){ y <- 4*x1^2+x1 return(y) } f2 <- function(x2){ y <- sin(6*x2) return(y) } f3 <- function(x3){ y <- cos(6*x3)-1 return(y) } f4 <- function(x4){ y <- 4*x4^3+x4^2 return(y) } t<- c() e <- c() d <- c() covariates <- c() dim = p n = n for (i in c(1:n)){ p <- runif(dim,-0.5,0.5) error <- rnorm(1,0,1) a <- f1(p[1])+f2(p[2])+f3(p[3])+f4(p[4])+error t[i] <- a e[i] <- error d <- t(data.frame(p)) covariates <- rbind(covariates,d) } censoring_probability_n <- c() coefficients <-matrix(runif(dim,-5,5),nrow = 1) for(i in c(1:dim(covariates)[1])){ variables <- t(matrix(covariates[i,],nrow = 1)) df4 <- coefficients%*%variables censoring_probability <- (exp(df4))/(1+exp(df4)) censoring_probability_n <- c(censoring_probability_n,censoring_probability) } censoring_indicator <- (censoring_probability_n >=0.5)*1 y <- c() for (i in c(1:length(censoring_indicator))){ if (censoring_indicator[i]==1){ y <- c(y,t[i]) }else{ y <- c(y,t[i]-exp(0.003)) } } df_for_y_with_measurement_error <- c() v <- data.frame(rnorm(length(censoring_indicator),0,e_var)) constant <- data.frame(rep(gamma0,length(censoring_indicator))) b_cov <- c() for (i in 1:dim(covariates)[1]){ b_cov[i] <- gamma1 %*% covariates[i,] } df_for_y_with_measurement_error <- cbind(y,constant,v,b_cov) colnames(df_for_y_with_measurement_error)[2:3] <-c('constant','v') y_with_measurement_error <- data.frame(apply(df_for_y_with_measurement_error,1,sum)) misclassification_proportion <- matrix(c(1-pi_01,pi_01,pi_10,1-pi_10), ncol = 2) censoring_probability_indicator_1 <- censoring_probability_n censoring_probability_indicator_0 <- 1 - censoring_probability_n df5 <- cbind(censoring_probability_indicator_1,censoring_probability_indicator_0) censoring_indicator_with_measurement_error <- NULL for (i in c(1:length(censoring_indicator))){ df6 <- misclassification_proportion %*%df5[i,] if (df6[1,]>=0.5){ censoring_indicator_with_measurement_error <- c(censoring_indicator_with_measurement_error,1) }else{ censoring_indicator_with_measurement_error <- c(censoring_indicator_with_measurement_error,0) } } generated_data <- cbind(y_with_measurement_error, censoring_indicator_with_measurement_error, covariates) colnames(generated_data)[1:2] <- c('failure time', 'censoring indicator') return(generated_data) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/data_generation.R
f1 <- function(x1){ y <- 4*x1^2+x1 return(y) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/f1.R
f2 <- function(x2){ y <- sin(6*x2) return(y) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/f2.R
f3 <- function(x3){ y <- cos(6*x3)-1 return(y) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/f3.R
f4 <- function(x4){ y <- 4*x4^3+x4^2 return(y) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/f4.R
calculation<- function(x){ values <- x-mean(x) return(values) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/matrix_value.R
#' @title Correction of Measurement Error in Survival time and Censoring Status. #' #' @description This function aims to correct for measurement error in survival time and #' misclassification in censoring status. The key strategy in the function \code{ME_correction} includes regression #' calibration for survival time under additive measurement error models and the unbiased conditional expectation #' approach for censoring status under misclassification models. With information of parameters in measurement error #' models implemented, this function will give outputs with corrected survival time and censoring status. #' #' @param pi_01 Misclassifcation probability is P(Observed Censoring Status = 0| Actual Censoring Status = 1). #' #' @param pi_10 Misclassifcation probability is P(Observed Censoring Status = 1 | Actual Censoring Status = 0). #' #' @param gamma0 A scalar that links the observed survival time and true survival time in the classical additive #' measurement error model \code{y*=y+gamma0+gamma1*X+v}, where \code{y*} #' is observed survival time and \code{y} is true survival time, and \code{x} is covariates and \code{v} is noise #' term. #' @param gamma1 A \code{p}-dimensional vector of parameters in the additive #' measurement error model \code{y*=y+gamma0+gamma1*X+v}, where \code{y*} is observed #' survival time and \code{y} is true survival time, \code{x} is covariates and \code{v} is #' noise term. #' #' @param indicator A \code{n}-dimensional vector of misclassified censoring status, such as the second column generated #' by the function \code{gen_data}. #' #' @param yast A \code{n}-dimensional vector of error-prone survival time, such as the first column #'generated by the function \code{gen_data}. #' #' @param cor_covar A \code{c(p,p)} covariance matrix of a \code{p}-dimensional vector of covariates. #' #' @param covariate A \code{c(n,p)} matrix of covariates. #' #' @return correction_data A \code{c(n,2)} data frame. This first column is the corrected survival time, and the #'second column is the corrected censoring indicator. #' #' @examples #' ## generate data with misclassification = 0.9 with n = 500, #' ## p = 50 and variance of noise term is 0.75. The y* is related #' ## to the first covariate. #' #' a <- matrix(0,ncol=50, nrow = 1);a[1,1] <- 1 #' data <- data_gen(n=500, p=50, pi_01 = 0.9, pi_10 = 0.9, #' gamma0=1, gamma1=a, e_var=0.75) #' #' ## Assume that covariates are independent and #' ## observed survival time is related to first covariate with #' ## weight equals 1. And the scalar in the classical additive #' ## measurement error model is 1 and is classifcation probability = 0.9. #' #' matrixa <- diag(50) #' gamma_0 <- 1 ; gamma_1 <- matrix(0,ncol=50, nrow =1); gamma_1[1,1] <- 1 #' corrected_data1 <- ME_correction(pi_10=0.9,pi_01=0.9,gamma0 = gamma_0, #' gamma1 = gamma_1, #' cor_covar=matrixa, y=data[,1], #' indicator=data[,2], covariate = data[,3:52]) #' #' @export #' ME_correction <- function(pi_10,pi_01,gamma0,gamma1,cor_covar,indicator,yast,covariate){ calculation<- function(x){ values <- x-mean(x) return(values) } y = yast cor_covarw<- matrix(1,ncol=dim(covariate)[2], nrow = 1) matrixa <- cor_covarw %*% cor_covar co_mean <- as.data.frame(apply(covariate,2, calculation)) correction_last_part <- c(matrixa%*%t(as.matrix(covariate))) mean_of_covariates <- apply(covariate,2,mean) estimated_w <- c(gamma0 + gamma1 %*% as.matrix(mean_of_covariates)) y_hat <- (y -correction_last_part- estimated_w) y_hat <-data.frame(y_hat) colnames(y_hat) <- c("y_hat") indicator_hat_probability <- (indicator-pi_10)/(1-pi_10-pi_01) indicator_hat <- NULL for (i in c(1:length(indicator_hat_probability))){ if (indicator_hat_probability[i]<0){ indicator_hat[i] <- 0 }else{ indicator_hat[i] <- 1 } } correction_data <- cbind(y_hat,indicator_hat) colnames(correction_data) <- c('corrected failure time', 'corrected censoring indicator') return(correction_data) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/measurement_error_correction.R
interval<- function(lower_bound, upper_bound){ interval_size = (upper_bound-lower_bound)/6 interval_points <- c(lower_bound) for(i in (1:6)){ interval_points <- c(interval_points,lower_bound+interval_size*i) } return(interval_points) }
/scratch/gouwar.j/cran-all/cranData/AFFECT/R/riemann_interval.R
require("fftwtools") require("pracma") require("data.table") require("gstat") require(sp) require(rgl) #require(reshape2) setOldClass("mesh3d") #' @title AFM image Power Spectrum Density analysis class #' #' @description \code{AFMImage3DModelAnalysis} #' #' @slot f1 a face of the 3D model #' @slot f2 a face of the 3D model #' @slot f3 a face of the 3D model #' @slot f4 a face of the 3D model #' @name AFMImage3DModelAnalysis-class #' @rdname AFMImage3DModelAnalysis-class #' @author M.Beauvais AFMImage3DModelAnalysis<-setClass("AFMImage3DModelAnalysis", slots = c( f1="mesh3d", f2="mesh3d", f3="mesh3d", f4="mesh3d", updateProgress="function"), validity = function(object) { return(TRUE) }) #' Display a 3D image of an AFMImage and store it on disk. #' #' Display a 3D image of an AFMImage and store it on disk if fullfilename variable is set. #' It uses the \code{\link{rgl}} package. #' #' @param AFMImage the AFM image to be displayed in three dimensions. #' @param fullfilename (optional) the directory and filename to save the png of the 3D image. If this variable is missing, the function will not save on disk the 3D image. #' @param width (optional) width of the image. Default is 512 pixels. Note: width can't be superior to screen resolution. #' @param changeViewpoint (optional) if TRUE, the viewpoint is changed. Default is TRUE. #' @param noLight if TRUE, the ligth is set off #' @author M.Beauvais #' @export displayIn3D<- function(AFMImage, width, fullfilename, changeViewpoint, noLight) { if(missing(width)){ width <- 512 } if(missing(fullfilename)){ save <- FALSE }else{ save <- TRUE } if(missing(changeViewpoint)){ changeViewpoint<-TRUE } # respect the proportion between horizontal / vertical distance and heigth newHeights <- (AFMImage@data$h)*(AFMImage@samplesperline)/(AFMImage@scansize) newHeights <- (AFMImage@data$h) minH<-min(newHeights) # TODO check validity of created image instead if(!is.na(minH)) { newH<-(newHeights-minH) y<-matrix(newH, nrow = AFMImage@lines, ncol = AFMImage@samplesperline) #z <- seq(ncol(y),1,by=-1) z <- seq(1,ncol(y),by=1) x <- (1:nrow(y)) ylim <- range(y) ylen <- ylim[2] - ylim[1] + 1 print(ylen) colorlut <- heat.colors(ylen, alpha = 1) # height color lookup table if (length(colorlut)==2) { colorlut <-c("#FFFFFFFF","#FFFF00FF") }else{ colorlut <- heat.colors(ylen, alpha = c(0,rep(1,length(colorlut)-1))) # height color lookup table } #print(colorlut) col <- colorlut[ y-ylim[1]+1 ] # assign colors to heights rgl.open() par3d(windowRect = 100 + c( 0, 0, width, width )) rgl.bg(color = c("white"), alpha=c(0.0), back = "lines") bboxylen=3 if(ylim[2]<60) bboxylen=2 #print(col) #print(c(0.0,rep(1.0,length(col)-1))) rgl.bbox(color = c("#333333", "black"), emission = "#333333", specular = "#111111", shininess = 0, alpha = 0.0, xlen=0, zlen=0, ylen=bboxylen ) rgl.surface(x, z, y, color=col, alpha=c(0.0,rep(1.0,length(col)-1)), back="lines") if (changeViewpoint) { i<-180 rotate3d(c(2, 0, 0), pi/2, 0, 1, 0) rgl.viewpoint(-i/2,i/2,zoom=0.5) } if (noLight) { rgl.clear( type = "lights" ) clear3d( type = c("lights")) rgl.light( theta = 0, phi = 0, viewpoint.rel = TRUE, ambient = "#FFFFFF", diffuse = "#FFFFFF", specular = "#000000") } if (save) { print(paste("saving", basename(fullfilename))) rgl.snapshot(fullfilename) } return(TRUE) } return(FALSE) } #' Display a 3D image of the holes in an AFMImage and store it on disk. #' #' Display a 3D image of the holes in an AFMImage and store it on disk if fullfilename variable is set. #' It uses the \code{\link{rgl}} package. #' #' @param AFMImage the AFM image to be displayed in three dimensions. #' @param fullfilename (optional) the directory and filename to save the png of the 3D image. If this variable is missing, the function will not save on disk the 3D image. #' @param width (optional) width of the image. Default is 512 pixels. Note: width can't be superior to screen resolution. #' @param changeViewpoint (optional) if TRUE, the viewpoint is changed. Default is TRUE. #' @param noLight if TRUE, the ligth is set off #' @author M.Beauvais #' @export displayHolesIn3D<- function(AFMImage, width, fullfilename, changeViewpoint, noLight) { invertBinaryAFMImage<-invertBinaryAFMImage(AFMImage) displayIn3D(AFMImage=invertBinaryAFMImage, width=width, fullfilename=fullfilename, changeViewpoint=changeViewpoint, noLight=noLight) } #' Calculate the 3D model for 3D printing #' #' \code{calculate3DModel} update \code{\link{AFMImage3DModelAnalysis}} #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImage3DModelAnalysis n \code{\link{AFMImage3DModelAnalysis}} to store the setup and results of PSD analysis #' #' @name calculate3DModel #' @rdname calculate3DModel-methods #' @exportMethod calculate3DModel #' @author M.Beauvais setGeneric(name= "calculate3DModel", def= function(AFMImage3DModelAnalysis, AFMImage) { return(standardGeneric("calculate3DModel")) }) #' @rdname calculate3DModel-methods #' @aliases calculate3DModel,AFMImage-method setMethod(f="calculate3DModel", "AFMImage3DModelAnalysis", definition= function(AFMImage3DModelAnalysis, AFMImage) { print(paste("exporting to stl format ")) baseThickness<-2 # respect the proportion between horizontal / vertical distance and heigth newHeights <- (AFMImage@data$h)*(AFMImage@samplesperline)/(AFMImage@scansize) minH<-min(newHeights) #print(paste("minH", minH)) if (minH<0) { newH<-(newHeights-minH+baseThickness) } else { newH<-(newHeights-minH+5) } #print(paste("min(newH)", min(newH))) #print(paste("max(newH)", max(newH))) totalLength<-4 counter<-0 if (!is.null(AFMImage3DModelAnalysis@updateProgress)&& is.function(AFMImage3DModelAnalysis@updateProgress)&& !is.null(AFMImage3DModelAnalysis@updateProgress())) { text <- paste0("starting ", totalLength, " calculations") #AFMImage3DModelAnalysis@updateProgress(message="Calculating 3D faces", value=0) AFMImage3DModelAnalysis@updateProgress(value= 0, detail = text) counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImage3DModelAnalysis@updateProgress(value= value, detail = text) print("update") }else{ print("no GUI update") print(is.null(AFMImage3DModelAnalysis@updateProgress)) print(is.function(AFMImage3DModelAnalysis@updateProgress)) print(is.null(AFMImage3DModelAnalysis@updateProgress())) } #face 1 x1<-seq(1:AFMImage@lines) y1<-rep(rep(1, each = AFMImage@lines) , each=1) z1<-newH[x1+(y1-1)*AFMImage @samplesperline] x1=c(x1,x1[length(x1)]) y1=c(y1,1) z1=c(z1,1) x1=c(x1,1) y1=c(y1,1) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,z1[1]) # print(length(x1)) # print(length(y1)) # print(length(z1)) # print(x1) # print(y1) # print(z1) f1<-polygon3d(x1, z1, y1, col = "red", plot=FALSE, fill=TRUE) f1<-rotate3d( f1 , -pi/2, 1, 0, 0 ) f1<-translate3d( f1 , 0, AFMImage@samplesperline+1, 0 ) #face 2 if (!is.null(AFMImage3DModelAnalysis@updateProgress)&& is.function(AFMImage3DModelAnalysis@updateProgress)&& !is.null(AFMImage3DModelAnalysis@updateProgress())) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImage3DModelAnalysis@updateProgress(value= value, detail = text) } y1<-rep(AFMImage@lines, each = AFMImage@lines) x1<-seq(1:AFMImage@lines) z1<-newH[x1+(y1-1)*AFMImage@samplesperline] x1=c(x1,x1[length(x1)]) y1=c(y1,y1[1]) z1=c(z1,1) x1=c(x1,1) y1=c(y1,y1[1]) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,z1[1]) # print(length(x1)) # print(length(y1)) # print(length(z1)) # print(x1) # print(y1) # print(z1) y1<-as.numeric(y1) # z1<-z1+rnorm(1:length(z1)) f2<-polygon3d(x1, z1, y1, col = "blue", plot=FALSE, fill=TRUE) f2<-rotate3d( f2 , -pi/2, 1, 0, 0 ) f2<-translate3d( f2 , 0, AFMImage@lines+1, 0 ) #face 3 if (!is.null(AFMImage3DModelAnalysis@updateProgress)&& is.function(AFMImage3DModelAnalysis@updateProgress)&& !is.null(AFMImage3DModelAnalysis@updateProgress())) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImage3DModelAnalysis@updateProgress(value= value, detail = text) } y1<-seq(1:AFMImage@samplesperline) x1<-rep(1, times = (AFMImage@samplesperline)) z1<-rev(newH[x1+(y1-1)*AFMImage@samplesperline]) x1=c(x1,x1[length(x1)]) y1=c(y1,y1[length(y1)]) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,z1[1]) # print(x1) # print(y1) # print(z1) f3<-polygon3d(y1, z1, x1, col = "red", plot=FALSE, fill=TRUE) f3<-rotate3d( f3 , -pi/2, 1, 0, 0 ) f3<-rotate3d( f3 , -pi/2, 0, 0, 1 ) f3<-translate3d( f3 , 0, 0, 0 ) #face 4 if (!is.null(AFMImage3DModelAnalysis@updateProgress)&& is.function(AFMImage3DModelAnalysis@updateProgress)&& !is.null(AFMImage3DModelAnalysis@updateProgress())) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImage3DModelAnalysis@updateProgress(value= value, detail = text) } y1<-seq(1:AFMImage@samplesperline) x1<-rep(AFMImage@lines, times = AFMImage@samplesperline) z1<-rev(newH[x1+(y1-1)*AFMImage@samplesperline]) x1=c(x1,x1[length(x1)]) y1=c(y1,y1[length(y1)]) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,1) x1=c(x1,x1[1]) y1=c(y1,y1[1]) z1=c(z1,z1[1]) f4<-polygon3d(y1, z1, x1, col = "red", plot=FALSE, fill=TRUE) f4<-rotate3d( f4 , -pi/2, 1, 0, 0 ) f4<-rotate3d( f4 , -pi/2, 0, 0, 1 ) AFMImage3DModelAnalysis@f1<-f1 AFMImage3DModelAnalysis@f2<-f2 AFMImage3DModelAnalysis@f3<-f3 AFMImage3DModelAnalysis@f4<-f4 return(AFMImage3DModelAnalysis) }) #' Export an AFM Image as a STL format file. #' #' Export an \code{\link{AFMImage}} as a STL format file thanks to the \code{\link{rgl}} package. The STL file can be used as an input for a 3D printing software tool.\cr\cr #' exportToSTL is compatible with slicr (http://slic3r.org) version 1.2.9 (GPL v3 licence).\cr #' In order to 3D print the AFM Image with slic3r, do as following: #' \itemize{ #' \item Use "File> Repair STL file..." menu option to create a file with the obj extension. #' \item Use "Add" button below the menu to display your AFM Image on the print board #' \item Right click on your AFM image. Use "Scale> uniformely" option, Set "15%" for your AFM image to fit your printing board #' } #' @param AFMImage3DModelAnalysis an \code{\link{AFMImage3DModelAnalysis}} #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param stlfullfilename directory and filename to save as a stl file #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' data("AFMImageOfRegularPeaks") #' AFMImage<-AFMImageOfRegularPeaks #' # calculate the 3D model : surface and the faces #' AFMImage3DModelAnalysis<-new ("AFMImage3DModelAnalysis") #' AFMImage3DModelAnalysis<-calculate3DModel(AFMImage3DModelAnalysis= AFMImage3DModelAnalysis, #' AFMImage= AFMImage) #' # export the 3D model to file #' exportDirectory=tempdir() #' print(paste("saving model in ", exportDirectory)) #' exportToSTL(AFMImage3DModelAnalysis=AFMImage3DModelAnalysis, #' AFMImage=AFMImage, #' stlfullfilename=paste(exportDirectory, "myFile.stl", sep="/")) #' } exportToSTL<- function(AFMImage3DModelAnalysis, AFMImage, stlfullfilename) { print(paste("exporting to stl format ", basename(stlfullfilename) )) baseThickness<-2 #AFMImage3DModelAnalysis<-calculate3DModel(AFMImage3DModelAnalysis= AFMImage3DModelAnalysis, AFMImage= AFMImage) # respect the proportion between horizontal / vertical distance and heigth newHeights <- (AFMImage@data$h)*(AFMImage@samplesperline)/(AFMImage@scansize) minH<-min(newHeights) #print(paste("minH", minH)) if (minH<0) { newH<-(newHeights-minH+baseThickness) } else { newH<-(newHeights-minH+5) } #print(paste("min(newH)", min(newH))) #print(paste("max(newH)", max(newH))) # surface z<-matrix(newH,nrow = AFMImage@lines,ncol = AFMImage@samplesperline) x <- (1:nrow(z)) y <- seq(ncol(z),1,by=-1) zlim <- range(z) zlen <- zlim[2] - zlim[1] + 1 colorlut <- heat.colors(zlen) col <- colorlut[ z-zlim[1]+1 ] rgl.open() par3d(windowRect = c(100,100,800,800)) shade3d(AFMImage3DModelAnalysis@f1) shade3d(AFMImage3DModelAnalysis@f2) shade3d(AFMImage3DModelAnalysis@f3) shade3d(AFMImage3DModelAnalysis@f4) terrain3d(x, y, z, color=col, front="lines", back="lines") # create a stl file print(paste("saving", basename(stlfullfilename))) writeSTL(stlfullfilename) print("done") } getHorizontalSlice<-function(AFMImage, levelMin, levelMax, width, fullfilename) { if(missing(width)){ width <- 512 } if(missing(fullfilename)){ save <- FALSE }else{ save <- TRUE print(fullfilename) } print("getHorizontalSlice") print(paste("levelMin=", levelMin, "levelMax= ",levelMax)) heights<-AFMImage@data$h minH<-min(heights) # print(tail(heights, n=10)) print(paste("min=", min(heights), "max= ",max(heights), "mean=", mean(heights))) indexfmin<-which( heights < levelMin | heights > levelMax) print(head(indexfmin, n=10)) print(length(indexfmin)) minH<-min(AFMImage@data$h) newH<-(AFMImage@data$h-minH) y<-matrix(newH,nrow = AFMImage@lines,ncol = AFMImage@samplesperline) x <- (1:nrow(y)) z <- (1:ncol(y)) ylim <- range(y) ylen <- ylim[2] - ylim[1] + 1 colorlut <- heat.colors(ylen) # height color lookup table col <- colorlut[ y-ylim[1]+1 ] # assign colors to heights oldMinH<-min(newH) AFMImage@data$h[indexfmin]=NA print("hhhhh") print(head(indexfmin, n=1)) AFMImage@data$h[head(indexfmin, n=1)]=minH newH<-AFMImage@data$h y<-matrix(newH,nrow = AFMImage@lines,ncol = AFMImage@samplesperline) rgl.open() par3d(windowRect = 100 + c( 0, 0, width, width ) ) rgl.bg(color = c("white"), back = "lines") bboxylen=3 if(ylim[2]<60) bboxylen=2 rgl.bbox(color = c("#333333", "black"), emission = "#333333", specular = "#111111", shininess = 0, alpha = 0.6, xlen=0, zlen=0, ylen=bboxylen ) rgl.surface(x, z, y, color=col, back="lines") i<-130 rgl.viewpoint(i,i/4,zoom=1.1) if (save) { print(paste("saving", basename(fullfilename))) rgl.snapshot(fullfilename) } } saveHorizontalSlices<-function(AFMImage, numberOfSlices, width, fullfilename) { heights<-AFMImage@data$h print(paste("min=", min(heights), "max= ",max(heights), "mean=", mean(heights))) minH=min(heights) maxH=max(heights) sliceHeight = ceil((maxH- minH)/numberOfSlices) sliceIndex = 0 for(i in seq(floor(minH), ceil(maxH), by=sliceHeight)) { sliceIndex<-sliceIndex+1 sliceName<-sliceIndex if (numberOfSlices>9) { if (sliceIndex<10) { sliceName<-paste("0", sliceIndex, sep="") } } newfullfilename = paste(fullfilename, "3D-horizontal-slice",sliceName, "png", sep=".") print(newfullfilename) getHorizontalSlice(AFMImage, i, i + sliceHeight, width, newfullfilename ) #getHorizontalSlice(AFMImage, i, i + sliceHeight, width) } } #' get 3D image full filename #' #' @param exportDirectory a diretcory to export image #' @param imageName the image name #' @author M.Beauvais #' @export get3DImageFullfilename<-function(exportDirectory, imageName) { fullfilename<-paste(exportDirectory, paste(imageName,"3D.png",sep="."), sep="/") return(fullfilename) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFM3DPrinter.R
require("fftwtools") require("pracma") require("data.table") require("gstat") require(sp) require("stringr") require(fractaldim) #require(reshape2) #' @title AFM image fractal dimension method class #' #' @description \code{AFMImageFractalDimensionMethod} stores calculation from one fractal dimension method #' #' @slot fd_method Two dimensional function names used to evaluate the fractal dimension and fractal scale #' @slot fd the value of the fractal dimension #' @slot fd_scale the value of the fractal scale #' @name AFMImageFractalDimensionMethod-class #' @rdname AFMImageFractalDimensionMethod-class #' @author M.Beauvais #' @seealso \code{\link{fractaldim}} AFMImageFractalDimensionMethod<-setClass("AFMImageFractalDimensionMethod", slots = c(fd_method="character", fd="numeric", fd_scale="numeric")) #' Constructor method of AFMImageFractalDimensionMethod Class. #' #' @param .Object an AFMImageFractalDimensionMethod object #' @param fd_method Two dimensional function names used to evaluate the fractal dimension and fractal scale #' @param fd the value of the fractal dimension #' @param fd_scale the value of the fractal scale #' @rdname AFMImageFractalDimensionMethod-class #' @export setMethod(f= "initialize", signature= "AFMImageFractalDimensionMethod", definition= function(.Object, fd_method, fd, fd_scale) { .Object@fd_method<-fd_method .Object@fd<-fd .Object@fd_scale<-fd_scale validObject(.Object) return(.Object) }) #' Wrapper function AFMImageFractalDimensionMethod #' #' @rdname AFMImageFractalDimensionMethod-class #' @export AFMImageFractalDimensionMethod <- function(fd_method, fd, fd_scale) { return(new("AFMImageFractalDimensionMethod", fd_method=fd_method, fd=fd, fd_scale=fd_scale)) } #' AFM image fractal dimensions analysis class #' #' A S4 class to handle the fractal dimension calculation with several fractal dimension methods #' #' @slot fractalDimensionMethods a list of \code{\link{AFMImageFractalDimensionMethod}} #' @slot csvFullfilename To be removed ? #' @slot updateProgress a function to update a graphical user interface #' @name AFMImageFractalDimensionsAnalysis-class #' @rdname AFMImageFractalDimensionsAnalysis-class #' @exportClass AFMImageFractalDimensionsAnalysis #' @author M.Beauvais #' AFMImageFractalDimensionsAnalysis<-setClass("AFMImageFractalDimensionsAnalysis", slots = c(fractalDimensionMethods="list", csvFullfilename="character", updateProgress="function")) #' Constructor method of AFMImageFractalDimensionsAnalysis Class. #' #' @param .Object an AFMImageFractalDimensionsAnalysis Class #' @param fractalDimensionMethods a list of \code{\link{AFMImageFractalDimensionMethod}} #' @param csvFullfilename To be removed ? #' @rdname AFMImageFractalDimensionsAnalysis-class #' @export setMethod("initialize", "AFMImageFractalDimensionsAnalysis", function(.Object, fractalDimensionMethods, csvFullfilename) { if(!missing(fractalDimensionMethods)) .Object@fractalDimensionMethods<-fractalDimensionMethods if(!missing(csvFullfilename)) .Object@csvFullfilename<-csvFullfilename validObject(.Object) return(.Object) }) #' Wrapper function AFMImageFractalDimensionsAnalysis #' #' @rdname AFMImageFractalDimensionsAnalysis-class #' @export AFMImageFractalDimensionsAnalysis <- function() { return(new("AFMImageFractalDimensionsAnalysis")) } #' Method \code{fractalDimensionMethods} returns a list of FractalDimensionMethod objects #' @name AFMImageFractalDimensionsAnalysis-class #' @rdname AFMImageFractalDimensionsAnalysis-class #' setGeneric("fractalDimensionMethods",function(object){standardGeneric("fractalDimensionMethods")}) setGeneric(name= "fractalDimensionMethods<-", def= function(AFMImageFractalDimensionsAnalysis, value) { return(standardGeneric("fractalDimensionMethods<-")) }) #' @rdname AFMImageFractalDimensionsAnalysis-class #' @aliases fractalDimensionMethods #' @param object a \code{\link{AFMImageFractalDimensionsAnalysis}} setMethod("fractalDimensionMethods",signature=signature(object='AFMImageFractalDimensionsAnalysis'), function(object) { return(object@fractalDimensionMethods) } ) setReplaceMethod(f="fractalDimensionMethods", signature(AFMImageFractalDimensionsAnalysis = "AFMImageFractalDimensionsAnalysis", value = "list"), definition= function(AFMImageFractalDimensionsAnalysis, value) { AFMImageFractalDimensionsAnalysis@fractalDimensionMethods <- value return(AFMImageFractalDimensionsAnalysis) }) #' Calculate 2D fractal dimensions and scales of an AFM Image #' #' \code{getFractalDimensions} calculates fractal dimensions and scales of an \code{\link{AFMImage}} with the fd.estim.method from the \code{\link{fractaldim}} package. #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageFractalDimensionsAnalysis an \code{\link{AFMImageFractalDimensionsAnalysis}} to store the results of the fractal analysis #' @return a list of \code{\link{AFMImageFractalDimensionMethod}} objects with the calculated fractal dimensions and scales #' @references Gneiting2012, Tilmann Gneiting, Hana Sevcikova and Donald B. Percival 'Estimators of Fractal Dimension: Assessing the Roughness of Time Series and Spatial Data - Statistics in statistical Science, 2012, Vol. 27, No. 2, 247-277' #' @author M.Beauvais #' @rdname AFMFractalDimensionAnalyser-getFractalDimensions #' @seealso \code{\link{fractaldim}} #' @export #' @examples #' \dontrun{ #' library(AFM) #' data(AFMImageOfAluminiumInterface) #' print(getFractalDimensions(AFMImageOfAluminiumInterface)) #' } getFractalDimensions<-function(AFMImage, AFMImageFractalDimensionsAnalysis) { if (missing(AFMImageFractalDimensionsAnalysis)) { AFMImageFractalDimensionsAnalysis<-NULL } graphicalUpdate<-FALSE if (!is.null(AFMImageFractalDimensionsAnalysis)&& !is.null(AFMImageFractalDimensionsAnalysis@updateProgress)&& is.function(AFMImageFractalDimensionsAnalysis@updateProgress)&& !is.null(AFMImageFractalDimensionsAnalysis@updateProgress())) { graphicalUpdate<-TRUE } if (graphicalUpdate) { AFMImageFractalDimensionsAnalysis@updateProgress(message="1/2 - Calculating table", value=0) } totalLength <- 8 counter<-0 if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } rf2d <- matrix(AFMImage@data$h, nrow=AFMImage@samplesperline) fullfilename<-AFMImage@fullfilename if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } fd2d_transectvar <- fd.estim.transect.var(rf2d, p.index = 1, direction='hvd+d-') if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } fd2d_transectincr1 <- fd.estim.transect.incr1(rf2d, p.index = 1, direction='hvd+d-') if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } fd2d_isotropic <- fd.estim.isotropic(rf2d, p.index = 1, direction='hvd+d-') if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } fd2d_squareincr <- fd.estim.squareincr(rf2d, p.index = 1) if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } fd2d_filter1 <- fd.estim.filter1(rf2d, p.index = 1) if (graphicalUpdate) { counter<-counter+1 value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImageFractalDimensionsAnalysis@updateProgress(value= 0, detail = text) } res=c(new("AFMImageFractalDimensionMethod", fd_method = "isotropic", fd = fd2d_isotropic$fd , fd_scale = fd2d_isotropic$scale)) res=c(res, new("AFMImageFractalDimensionMethod", fd_method = "transectvar", fd = fd2d_transectvar$fd, fd_scale = fd2d_transectvar$scale)) res=c(res, new("AFMImageFractalDimensionMethod", fd_method = "transectincr1", fd = fd2d_transectincr1$fd, fd_scale = fd2d_transectincr1$scale)) res=c(res, new("AFMImageFractalDimensionMethod", fd_method = "squareincr", fd = fd2d_squareincr$fd, fd_scale = fd2d_squareincr$scale)) res=c(res, new("AFMImageFractalDimensionMethod", fd_method = "filter1", fd = fd2d_filter1$fd, fd_scale = fd2d_filter1$scale)) return(res) } exportFractalDimImagesForReport<-function(AFMImage, reportDirectory) { warning("Possible inconsistency between fractaldim images and values") sampleName<-basename(AFMImage@fullfilename) rf2d <- matrix(AFMImage@data$h, nrow=AFMImage@samplesperline) png(getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "isotropic")) fd2d_isotropic <- fd.estim.isotropic(rf2d, p.index = 1, direction='hvd+d-', plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() png(getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "squareincr")) fd2d_squareincr <- fd.estim.squareincr(rf2d, p.index = 1, plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() png(getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "filter1")) fd2d_filter1 <- fd.estim.filter1(rf2d, p.index = 1, plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() } getFractalDimensionsPngFullfilename<-function(reportDirectory, imagebasename, method) { return(paste(reportDirectory, paste(imagebasename, "-", method, "-fractaldim.png", sep=""), sep="/")) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMFractalDimensionAnalyser.R
require("data.table") require("mixtools") # normality tests require(gridExtra) require(ggplot2) #if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("r", "roughness","x","predict.gstat")) #' @title AFM image Gaussian Mix analysis class #' #' @description \code{AFMImageGaussianMixAnalysis} handles an \code{\link{AFMImage}} Gaussian mix of heights analysis #' #' @slot minGaussianMix the minimum number of components to calculate #' @slot maxGaussianMix the maximum number of components to calculate #' @slot epsilonGaussianMix the convergence criterion #' @slot gaussianMix a data.table to store the calculated Gaussian mixes #' @slot summaryMixture a data.table to summaryse the mixtures #' @slot tcdfsEcdfsCheck an array to store the points to draw tcdfs ecdfs check #' @slot densityCurvesAllHeights an array to store the points to draw the density curves #' @slot eachComponentsCounts an array to store the points to draw counts of each components #' @slot updateProgress a function to update a graphical user interface #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class #' @author M.Beauvais AFMImageGaussianMixAnalysis<-setClass("AFMImageGaussianMixAnalysis", slots = c( minGaussianMix="numeric", maxGaussianMix="numeric", epsilonGaussianMix="numeric", gaussianMix="array", summaryMixture="data.table", tcdfsEcdfsCheck="array", densityCurvesAllHeights="array", eachComponentsCounts="array", updateProgress="function"), validity = function(object) { return(TRUE) } ) #' Constructor method of AFMImageGaussianMixAnalysis Class. #' #' @param .Object an AFMImageGaussianMixAnalysis object #' @rdname AFMImageGaussianMixAnalysis-class #' @export setMethod("initialize", "AFMImageGaussianMixAnalysis", function(.Object) { .Object@minGaussianMix<-2 .Object@maxGaussianMix<-2 .Object@epsilonGaussianMix<-1e-4 .Object@gaussianMix<-array() .Object@summaryMixture<-data.table() .Object@tcdfsEcdfsCheck<-array() .Object@densityCurvesAllHeights<-array() .Object@eachComponentsCounts<-array() validObject(.Object) ## valide l'objet return(.Object) }) #' Wrapper function AFMImageGaussianMixAnalysis #' #' @rdname AFMImageGaussianMixAnalysis-class #' @export AFMImageGaussianMixAnalysis <- function() { return(new("AFMImageGaussianMixAnalysis")) } #' Method \code{eachComponentsCounts} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("summaryMixture",function(object){standardGeneric("summaryMixture")}) setGeneric(name= "summaryMixture<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("summaryMixture<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases summaryMixture #' @param object a \code{\link{AFMImageGaussianMixAnalysis}} setMethod("summaryMixture",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@summaryMixture) } ) setReplaceMethod(f="summaryMixture", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "data.table"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@summaryMixture <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{eachComponentsCounts} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("eachComponentsCounts",function(object){standardGeneric("eachComponentsCounts")}) setGeneric(name= "eachComponentsCounts<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("eachComponentsCounts<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases eachComponentsCounts setMethod("eachComponentsCounts",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@eachComponentsCounts) } ) setReplaceMethod(f="eachComponentsCounts", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "array"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@eachComponentsCounts <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{tcdfsEcdfsCheck} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("tcdfsEcdfsCheck",function(object){standardGeneric("tcdfsEcdfsCheck")}) setGeneric(name= "tcdfsEcdfsCheck<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("tcdfsEcdfsCheck<-")) }) #' Method \code{densityCurvesAllHeights} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("densityCurvesAllHeights",function(object){standardGeneric("densityCurvesAllHeights")}) setGeneric(name= "densityCurvesAllHeights<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("densityCurvesAllHeights<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases densityCurvesAllHeights setMethod("densityCurvesAllHeights",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@densityCurvesAllHeights) } ) setReplaceMethod(f="densityCurvesAllHeights", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "array"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@densityCurvesAllHeights <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{tcdfsEcdfsCheck} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("tcdfsEcdfsCheck",function(object){standardGeneric("tcdfsEcdfsCheck")}) setGeneric(name= "tcdfsEcdfsCheck<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("tcdfsEcdfsCheck<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases tcdfsEcdfsCheck setMethod("tcdfsEcdfsCheck",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@tcdfsEcdfsCheck) } ) setReplaceMethod(f="tcdfsEcdfsCheck", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "array"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@tcdfsEcdfsCheck <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{GaussianMix} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("gaussianMix",function(object){standardGeneric("gaussianMix")}) setGeneric(name= "gaussianMix<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("gaussianMix<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases gaussianMix setMethod("gaussianMix",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@gaussianMix) } ) setReplaceMethod(f="gaussianMix", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "array"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@gaussianMix <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{minGaussianMix} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("minGaussianMix",function(object){standardGeneric("minGaussianMix")}) setGeneric(name= "minGaussianMix<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("minGaussianMix<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases minGaussianMix setMethod("minGaussianMix",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@minGaussianMix) } ) setReplaceMethod(f="minGaussianMix", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "numeric"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@minGaussianMix <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{maxGaussianMix} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("maxGaussianMix",function(object){standardGeneric("maxGaussianMix")}) setGeneric(name= "maxGaussianMix<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("maxGaussianMix<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases maxGaussianMix setMethod("maxGaussianMix",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@maxGaussianMix) } ) setReplaceMethod(f="maxGaussianMix", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "numeric"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@maxGaussianMix <- value return(AFMImageGaussianMixAnalysis) }) #' Method \code{epsilonGaussianMix} returns a data.table of Gaussian mixes #' @name AFMImageGaussianMixAnalysis-class #' @rdname AFMImageGaussianMixAnalysis-class setGeneric("epsilonGaussianMix",function(object){standardGeneric("epsilonGaussianMix")}) setGeneric(name= "epsilonGaussianMix<-", def= function(AFMImageGaussianMixAnalysis, value) { return(standardGeneric("epsilonGaussianMix<-")) }) #' @rdname AFMImageGaussianMixAnalysis-class #' @aliases epsilonGaussianMix setMethod("epsilonGaussianMix",signature=signature(object='AFMImageGaussianMixAnalysis'), function(object) { return(object@epsilonGaussianMix) } ) setReplaceMethod(f="epsilonGaussianMix", signature(AFMImageGaussianMixAnalysis = "AFMImageGaussianMixAnalysis", value = "numeric"), definition= function(AFMImageGaussianMixAnalysis, value) { AFMImageGaussianMixAnalysis@epsilonGaussianMix <- value return(AFMImageGaussianMixAnalysis) }) #' Perform the calculation for the Gaussian mixes #' #' \code{\link{performGaussianMixCalculation}} perform all the calculation for PSD exploitation #' @param AFMImageGaussianMixAnalysis an \code{\link{AFMImageGaussianMixAnalysis}} to manage and store the results of PSD analysis #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageCollagenNetwork) #' #' AFMImage<-AFMImageCollagenNetwork #' AFMImage@fullfilename<-"/Users/one/AFMImageCollagenNetwork.txt" #' gMixAnalysis<-AFMImageGaussianMixAnalysis() #' # from two components #' gMixAnalysis@minGaussianMix<-2 #' # to four components #' gMixAnalysis@maxGaussianMix<-4 #' # convergence criteria #' gMixAnalysis@epsilonGaussianMix<-1e-4 #' # Create a closure to update progress #' gMixAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { #' if (exists("progressGaussianMix")){ #' if (!is.null(message)) { #' progressGaussianMix$set(message = message, value = 0) #' }else{ #' progressGaussianMix$set(value = value, detail = detail) #' } #' } #' } #' gMixAnalysis<-performGaussianMixCalculation(AFMImageGaussianMixAnalysis= gMixAnalysis, AFMImage) #' print("done performGaussianMixCalculation") #' } performGaussianMixCalculation<-function(AFMImageGaussianMixAnalysis, AFMImage) { number_of_components<-ecdf<-density<-NULL # if (is.function(AFMImagePSDAnalysis@updateProgress)) { # AFMImagePSDAnalysis@updateProgress(message="1/3 - Calculating PSD2D", value=0) # } # if (is.function(AFMImageGaussianMixAnalysis@updateProgress)&& # !is.null(AFMImageGaussianMixAnalysis@updateProgress())) { # AFMImageGaussianMixAnalysis@updateProgress(message="Calculating Gaussian Mixes", value=0) # } if (is.function(AFMImageGaussianMixAnalysis@updateProgress)) { AFMImageGaussianMixAnalysis@updateProgress(message="Calculating Gaussian Mixes", value=0) } #data(AFMImageCollagenNetwork) #AFMImage<-AFMImageCollagenNetwork # parameters min<-AFMImageGaussianMixAnalysis@minGaussianMix max<-AFMImageGaussianMixAnalysis@maxGaussianMix mepsilon<-AFMImageGaussianMixAnalysis@epsilonGaussianMix gaussianMixList = array(list(), max) min_height<- 0 max_height<- 3000 heights<-AFMImage@data$h heights<-heights[heights<(max_height/10)] # allH<-data.table(h=heights) # g<-ggplot(allH, aes(h)) + geom_histogram(binwidth = 0.1) # print(g) mixtureCounter<-0 mixtureNumberOfComponents<-min for(mixtureNumberOfComponents in seq(min,max)){ if (is.function(AFMImageGaussianMixAnalysis@updateProgress)) { mixtureCounter<-mixtureCounter+1 AFMImageGaussianMixAnalysis@updateProgress(message=paste("Calculating Gaussian Mixes", mixtureCounter ,"/",(as.numeric(max)-as.numeric(min)+1)) , value=0) #detail = paste0((as.numeric(max)-as.numeric(min)+1),"/",(as.numeric(max))), } heights.k<- mixtools::normalmixEM(heights, k=mixtureNumberOfComponents, arbmean = TRUE, ECM=TRUE, verb=TRUE, maxit=10000, epsilon=mepsilon) #heights.k gaussianMixList[[mixtureNumberOfComponents]]<-heights.k } AFMImageGaussianMixAnalysis@gaussianMix<-gaussianMixList # creating the summary res=data.table(number_of_components=c(0), #component=c(0), mean=c(0), sd=c(0), lambda=c(0)) totalNbOfMixtures<-length(AFMImageGaussianMixAnalysis@gaussianMix) #totalNbOfMixtures<-length(gMixAnalysis@gaussianMix) for (mixtureNumberOfComponents in seq(AFMImageGaussianMixAnalysis@minGaussianMix,totalNbOfMixtures)) { #for (mixtureNumberOfComponents in seq(gMixAnalysis@minGaussianMix,totalNbOfMixtures)) { if (!is.null(AFMImageGaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { mixture<-AFMImageGaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]] #mixture<-gMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]] for(component.number in seq(1, mixtureNumberOfComponents)) { if (length(mixture)>0) { mean=mixture$mu[component.number] sd=mixture$sigma[component.number] lambda=mixture$lambda[component.number] res=rbind(res, data.table(number_of_components=mixtureNumberOfComponents, #component=component.number, mean=mean, sd=sd, lambda=lambda)) } } } } res<-res[-1,] res<-res[order(number_of_components, mean)] res AFMImageGaussianMixAnalysis@summaryMixture<-res # creating points to draw curves AFMImageGaussianMixAnalysis@tcdfsEcdfsCheck<-array(list(), max) AFMImageGaussianMixAnalysis@densityCurvesAllHeights<-array(list(), max) AFMImageGaussianMixAnalysis@eachComponentsCounts<-array(list(), max) heights<-AFMImage@data$h distinct.heights <- sort(unique(heights)) totalNbOfMixtures<-length(AFMImageGaussianMixAnalysis@gaussianMix) - length(AFMImageGaussianMixAnalysis@gaussianMix[sapply(AFMImageGaussianMixAnalysis@gaussianMix, is.null)]) print(totalNbOfMixtures) for (mixtureNumberOfComponents in seq(AFMImageGaussianMixAnalysis@minGaussianMix,length(AFMImageGaussianMixAnalysis@gaussianMix))) { baseSheetName<-paste0(mixtureNumberOfComponents,"-components-") print(paste("mixtureNumberOfComponents= ",mixtureNumberOfComponents)) if (!is.null(AFMImageGaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { #if (!is.null(gMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { heights.k<-AFMImageGaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]] #heights.k<-gMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]] tcdfs <- pnormmix(distinct.heights,mixture=heights.k) ecdfs <- ecdf(heights)(distinct.heights) TheExpDT<-data.table(tcdfs,ecdfs) AFMImageGaussianMixAnalysis@tcdfsEcdfsCheck[[mixtureNumberOfComponents]]<-TheExpDT densityCurves<-data.frame(x=density(heights)$x , y=density(heights)$y, style=rep("Kernel", length(density(heights)$y))) x <- seq(min(density(heights)$x),max(density(heights)$x),length=1000) densityCurves2<-data.frame(x=x, y=dnormalmix(x,heights.k), style=rep("Mixture", length(dnormalmix(x,heights.k)))) allHeights<-rbind(densityCurves,densityCurves2) AFMImageGaussianMixAnalysis@densityCurvesAllHeights[[mixtureNumberOfComponents]]<-allHeights allComponents<-data.table(heights=c(0),counts=c(0), component.number=c(0)) for(component.number in seq(1, mixtureNumberOfComponents)) { tlength=1000 x <- seq(min(density(heights)$x),max(density(heights)$x),length=tlength) y <- dnorm(x,mean=(heights.k$mu[component.number]), sd=heights.k$sigma[component.number])*length(heights)*heights.k$lambda[component.number] allComponents<-rbind(allComponents, data.table(heights=x,counts=y, component.number=rep(component.number,tlength))) } allComponents<-allComponents[-1,] AFMImageGaussianMixAnalysis@eachComponentsCounts[[mixtureNumberOfComponents]]<-allComponents } } #print(gaussianMixList) return(AFMImageGaussianMixAnalysis) } getGaussianMix<-function(exportDirectory, sampleName) { exportCsvFilename<-paste(sampleName,"-gaussian-mix.png", sep="") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") return(exportCsvFullFilename) } #' pnormmix distribution of a mixture of normals #' #' @param q a vector of quantiles #' @param mixture a gaussian mixture #' @export pnormmix <- function(q,mixture) { lambda <- mixture$lambda k <- length(lambda) pnorm.from.mix <- function(q,component) { lambda[component]*pnorm(q,mean=mixture$mu[component], sd=mixture$sigma[component]) } pnorms <- sapply(1:k,pnorm.from.mix,q=q) return(rowSums(pnorms)) } #' dnormalmix density of a mixture of normals #' #' @param x a vector of quantiles #' @param mixture a gaussian mixture #' @param log perform a log transsformation of the result #' @export dnormalmix <- function(x,mixture,log=FALSE) { lambda <- mixture$lambda k <- length(lambda) like.component <- function(x,component) { lambda[component]*dnorm(x,mean=mixture$mu[component], sd=mixture$sigma[component]) } likes <- sapply(1:k,like.component,x=x) d <- rowSums(likes) if (log) { d <- log(d) } return(d) } #' loglike sum of density of a mixture of normals #' #' @param x a vector of quantiles #' @param mixture a gaussian mixture #' @export loglike.normalmix <- function(x,mixture) { loglike <- dnormalmix(x,mixture,log=TRUE) return(sum(loglike)) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMGaussianMixAnalyser.R
require("data.table") require("fftwtools") require("pracma") require("gstat") require("sp") require("stringr") #require(reshape2) library("dbscan") if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("x", "y")) #' AFM image class #' #' A S4 class to store and manipulate images from Atomic Force Microscopes. #' #' @slot data ($x,$y,$h): a data.table storing the coordinates of the sample and the measured heights #' @slot samplesperline number of samples per line (e.g.: 512) #' @slot lines number of line (e.g.: 512) #' @slot hscansize horizontal size of scan usualy in nanometer (e.g.: hscansize=1000 for a scan size of 1000 nm) #' @slot vscansize vertical size of scan usualy in nanometer (e.g.: vscansize=1000 for a scan size of 1000 nm) #' @slot scansize if hscansize equals vscansize, scansize is the size of scan usualy in nanometer (e.g.: scansize=1000 for a scan size of 1000 nm) #' @slot fullfilename directory and filename on the disk (e.g.: /users/ubuntu/flatten-image.txt) #' @author M.Beauvais #' @examples #' \dontrun{ #' library(AFM) #' library(data.table) #' #' # create a 128 pixels by 128 pixels AFM image #' Lines=128 #' Samplesperline=128 #' fullfilename="RandomFakeAFMImage" #' # the size of scan is 128 nm #' ScanSize=128 #' # the heights is a normal distribution in nanometers #' nm<-c(rnorm(128*128, mean=0, sd=1 )) #' #' scanby<-ScanSize/Samplesperline #' endScan<-ScanSize*(1-1/Samplesperline) #' RandomFakeAFMImage<-AFMImage( #' data = data.table(x = rep(seq(0,endScan, by= scanby), times = Lines), #' y = rep(seq(0,endScan, by= scanby), each = Samplesperline), #' h = nm), #' samplesperline = Samplesperline, lines = Lines, #' vscansize = ScanSize, hscansize = ScanSize, scansize = ScanSize, #' fullfilename = fullfilename ) #' } #' @name AFMImage-class #' @rdname AFMImage-class #' @exportClass AFMImage AFMImage<-setClass("AFMImage", slots = c(data="data.table", samplesperline="numeric", lines="numeric", hscansize="numeric", vscansize="numeric", scansize="numeric", fullfilename="character" )) #' Constructor method of AFMImage Class. #' #' @param .Object an AFMImage object #' @param data ($x,$y,$h): a data.table storing the coordinates of the sample and the measured heights #' @param samplesperline number of samples per line (e.g.: 512) #' @param lines number of line (e.g.: 512) #' @param hscansize horizontal size of scan usualy in nanometer (e.g.: hscansize=1000 for a scan size of 1000 nm) #' @param vscansize vertical size of scan usualy in nanometer (e.g.: vscansize=1000 for a scan size of 1000 nm) #' @param scansize if hscansize equals vscansize, scansize is the size of scan usualy in nanometer (e.g.: scansize=1000 for a scan size of 1000 nm) #' @param fullfilename directory and filename on the disk (e.g.: /users/ubuntu/flatten-image.txt) #' @rdname AFMImage-class #' @export setMethod(f="initialize", signature="AFMImage", definition= function(.Object, data, samplesperline, lines, hscansize, vscansize, scansize, fullfilename) { if (!missing(data)) .Object@data<-data if (!missing(samplesperline)) .Object@samplesperline<-samplesperline if (!missing(lines)) .Object@lines<-lines if (!missing(hscansize)) .Object@hscansize<-hscansize if (!missing(vscansize)) .Object@vscansize <-vscansize if (!missing(scansize)) .Object@scansize <-scansize if (!missing(fullfilename)) .Object@fullfilename<-fullfilename validObject(.Object) return(.Object) }) #' Wrapper function AFMImage #' #' @rdname AFMImage-class #' @export AFMImage <- function(data, samplesperline, lines, hscansize, vscansize, scansize, fullfilename) { return(new("AFMImage", data, samplesperline, lines, hscansize, vscansize, scansize, fullfilename)) } #' AFM image sample #' #' A real dataset containing an \code{\link{AFMImage}} of an Aluminium interface. #' The image is made of 512*512 samples of a 1000 nm * 1000 nm surface. #' samplesperline=512 #' lines=512 #' hscansize=1000 #' vscansize=1000 #' #' @name AFMImageOfAluminiumInterface #' @author J.Landoulsi, I.Liascukiene NULL #' AFM image sample #' #' A fake dataset containing a manually generated \code{\link{AFMImage}} (peaks regularly positioned on the surface). #' The image is made of 128*128 samples of a 128 nm * 128 nm surface. #' samplesperline= 128 #' lines= 128 #' hscansize= 128 #' vscansize= 128 #' #' @name AFMImageOfRegularPeaks NULL #' AFM image sample #' #' A fake dataset containing a manually generated \code{\link{AFMImage}} (one peak positioned on the surface). #' The image is made of 128*128 samples of a 128 nm * 128 nm surface. #' samplesperline= 128 #' lines= 128 #' hscansize= 128 #' vscansize= 128 #' #' @name AFMImageOfOnePeak NULL #' AFM image sample #' #' A fake dataset containing a manually generated \code{\link{AFMImage}} (a normal distribution of heights). #' The image is made of 128*128 samples of a 128 nm * 128 nm surface. #' samplesperline= 128 #' lines= 128 #' hscansize= 128 #' vscansize= 128 #' #' @name AFMImageOfNormallyDistributedHeights NULL #' AFM image sample #' #' A real dataset containing an \code{\link{AFMImage}} of a collagen network. #' The image is made of 192*192 samples of a 1500 nm * 1500 nm surface. #' samplesperline=192 #' lines=192 #' hscansize=1500 #' vscansize=1500 #' #' @name AFMImageCollagenNetwork NULL #' Import data from nanoscope analysis(tm) tool #' #' The imported file should contain a header and list of heights #' The header should contain the following fields: #' \itemize{ #' \item Lines: number of scanned lines (e.g. 512) #' \item Sampsline: number of scan per line (e.g. 512) #' \item ScanSize: the sample size (e.g. 1000nm) the extension nm is mandatory and will be removed #' } #' #' \code{importFromNanoscope} returns an \code{\link{AFMImage}} #' @param fullfilename a concatenated string of the directory and filename exported with Nanoscope analysis(TM) software #' @author M.Beauvais #' @rdname AFMImage-importFromNanoscope #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' fullfilename<-"/user/ubuntu/NanoscopeFlattenExportedFile.txt" #' myAFMimage<-importFromNanoscope(fullfilename) #' displayIn3D(myAFMimage, width=1024, noLight=TRUE)) #' } importFromNanoscope<-function(fullfilename){ #print(fullfilename) filename =basename(fullfilename) #print(filename) # "\Version: 0x08100000" # "\Samps/line: 512" # "\Lines: 512" # "\Scan Size: 1000 nm" #Height(nm) headerEndString<-"Height(nm)" wholeFile <- fread(fullfilename, sep="\t", quote="") wholeFile <- unlist(wholeFile) headerSizeWhich<-which(wholeFile==headerEndString)+1 #print(headerSizeWhich) hdrs <- read.table(fullfilename, nrows=headerSizeWhich, skip=2, comment.char="", strip.white= TRUE, check.names=TRUE) newhdrs<-sapply(hdrs, function(x) { x<-str_replace_all(x, pattern="[^0-9a-zA-Z,.:]+", replacement="") }) unlistedNewHdrs=unlist(strsplit(newhdrs, ":", fixed=TRUE)) oneSamplesperline =which(tolower(unlistedNewHdrs)== "sampsline") Samplesperline = as.numeric(unlistedNewHdrs[oneSamplesperline+1][1]) #Samplesperline oneScanSize =which(tolower(unlistedNewHdrs)== "scansize") ScanSize= unlistedNewHdrs[oneScanSize+1][1] ScanSize = as.numeric(substr(ScanSize, 1, nchar(ScanSize)-2)) #ScanSize oneLines =which(tolower(unlistedNewHdrs)== "lines") Lines = as.numeric(unlistedNewHdrs[oneLines+1][1]) #Lines print(paste(ScanSize, Samplesperline, Lines)) nM <- read.table(fullfilename, skip=headerSizeWhich) nM <- unlist(nM) scanSizeFromZero<-ScanSize-1 scanby<-ScanSize/Samplesperline endScan<-ScanSize*(1-1/Samplesperline) print(paste("imported ", filename, "...", sep="")) return(new ("AFMImage", data = data.table(x = rep(seq(0,endScan, by= scanby), times = Lines), y = rep(seq(0,endScan, by= scanby), each = Samplesperline), h = nM), samplesperline = Samplesperline, lines = Lines, hscansize = ScanSize, vscansize = ScanSize, scansize = ScanSize, fullfilename = fullfilename)) } getAFMImageFromMatrix<-function(binaryAFMImage, aMatrix) { Lines<-binaryAFMImage@lines Samplesperline<-binaryAFMImage@samplesperline ScanSize<-binaryAFMImage@scansize scanby<-binaryAFMImage@scansize/binaryAFMImage@samplesperline endScan<-binaryAFMImage@scansize*(1-1/binaryAFMImage@samplesperline) fullfilename="circlesMatrixImage" circlesMatrixAFMImage<-AFMImage( data = data.table(x = rep(seq(0,endScan, by= scanby), times = Lines), y = rep(seq(0,endScan, by= scanby), each = Samplesperline), h = as.vector(aMatrix)), samplesperline = Samplesperline, lines = Lines, vscansize = ScanSize, hscansize = ScanSize, scansize = ScanSize, fullfilename = fullfilename ) return(circlesMatrixAFMImage) } #' Save an AFM image on disk. #' #' The function saves the an \code{\link{AFMImage}} as a rdata file. It uses the fullfilename param of the \code{\link{AFMImage}} and add "AFMImage.rda" extension to save the rdata file on disk. #' #' \code{saveOnDisk} save on disk an \code{\link{AFMImage}} as rdata file #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param exportDirectory an optional argument to change the directory where the rdata file will be stored on disk #' @author M.Beauvais #' @rdname AFMImage-saveOnDisk #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' # save the rdata file of the AFMImage in the tempdir() directory; #' # select another directory to save it permanently on your hard drive #' saveOnDisk(AFMImageOfAluminiumInterface, tempdir()) #' } saveOnDisk<-function(AFMImage, exportDirectory){ if (missing(exportDirectory)) { exportDirectory=dirname(AFMImage@fullfilename) } fullfilename<-paste(exportDirectory, paste(basename(AFMImage@fullfilename),"AFMImage.rda",sep="-"), sep="/") save(AFMImage, file=fullfilename) } #' Get a sample of an AFM image. #' #' Random selection of heights to keep in an \code{\link{AFMImage}}. #' This function can be used to calculate quickly an approximated variogram of a large image. #' #' \code{sampleAFMImage} returns a sample of the AFMImage to boost calculation time of variogram #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param percentage percentage of heights to keep #' @return a sample of an \code{\link{AFMImage}} #' @author M.Beauvais #' @rdname AFMImage-sampleAFMImage #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfAluminiumInterface) #' anAFMImageSample<-sampleAFMImage(AFMImageOfAluminiumInterface,15) #' variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage=3.43) #' avario<-AFM::calculateOmnidirectionalVariogram(AFMImage= anAFMImageSample, #' AFMImageVariogramAnalysis= variogramAnalysis) #' dist<-gamma<-NULL #' p1 <- ggplot(avario, aes(x=dist, y=gamma)) #' p1 <- p1 + geom_point() #' p1 <- p1 + geom_line() #' p1 <- p1 + ylab("semivariance") #' p1 <- p1 + xlab("distance (nm)") #' p1 <- p1 + ggtitle("Approximation of variogram thanks to sampling") #' p1 #' } #' sampleAFMImage<-function(AFMImage, percentage) { totalSize<-nrow(AFMImage@data) sampleSize<-floor(totalSize*percentage/100) sampleAFMImage<-AFMImage sampleAFMImage@data<-AFMImage@data[sample(1:totalSize, size=sampleSize, replace=F), ] sampleAFMImage@fullfilename<-paste(sampleAFMImage@fullfilename, "-sample.txt", sep="") sampleAFMImage } #' Extract a portion of an AFM image. #' #' The extract will be a square of the specified size. #' If the size is too large for the original \code{\link{AFMImage}}, only the biggest valid size will be kept. #' #' \code{extractAFMImage} returns an extract of the AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param cornerX horizontal coordinates of the extract #' @param cornerY vertical coordinates of the extract #' @param size square size of the extract in number of pixels #' @return a new \code{\link{AFMImage}} sample #' @author M.Beauvais #' @export #' @rdname AFMImage-extractAFMImage #' @examples #' \dontrun{ #' data(AFMImageOfAluminiumInterface) #' anAFMImageExtract<-extractAFMImage(AFMImageOfAluminiumInterface,15,15,256) #' } #' extractAFMImage<-function(AFMImage, cornerX, cornerY, size) { size2<-size size<-size-1 cornerX<-cornerX*AFMImage@hscansize/AFMImage@samplesperline cornerY<-cornerY*AFMImage@vscansize/AFMImage@lines size=size*AFMImage@hscansize/AFMImage@samplesperline minX<-cornerX maxX<-cornerX+size minY<-cornerY maxY<-cornerY+size print(paste(cornerX,cornerY,size,minX, maxX, minY,maxY)) alldata<-copy(AFMImage@data) key(alldata) keycols = c("y","x") setkeyv(alldata,keycols) x<-y<-NULL alldata<-alldata[x>=minX & x<=maxX & y>=minY & y<=maxY] alldata$x<-alldata$x-min(alldata$x) alldata$y<-alldata$y-min(alldata$y) hscansize<-AFMImage@hscansize/(AFMImage@samplesperline/size2) vscansize<-AFMImage@vscansize/(AFMImage@lines/size2) # vscansize<-max(alldata$x)-min(alldata$x)+1 # hscansize<-max(alldata$y)-min(alldata$y)+1 scansize<-max(vscansize, hscansize) samplesperline<-length(unique(alldata$y)) lines<-length(unique(alldata$x)) fullfilename<-paste(AFMImage@fullfilename, "extract.txt", sep="-") newAFMImage=new("AFMImage", data=alldata, vscansize=vscansize, hscansize=hscansize, scansize=scansize, samplesperline=samplesperline, lines=lines, fullfilename=fullfilename ) # newAFMImage@data<-copy(AFMImage@data) # alldata<-newAFMImage@data # key(alldata) # keycols = c("y","x") # setkeyv(alldata,keycols) # #max(AFMImage@data$x) # #max(alldata$h) # newAFMImage@data<-alldata[x>=minX & x<=maxX & y>=minY & y<=maxY] # newAFMImage@data$x<-newAFMImage@data$x-min(newAFMImage@data$x) # newAFMImage@data$y<-newAFMImage@data$y-min(newAFMImage@data$y) # newAFMImage@vscansize<-max(newAFMImage@data$x)-min(newAFMImage@data$x)+1 # newAFMImage@hscansize<-max(newAFMImage@data$y)-min(newAFMImage@data$y)+1 # newAFMImage@scansize<-max(newAFMImage@vscansize, newAFMImage@hscansize) # newAFMImage@samplesperline<-length(unique(newAFMImage@data$y)) # newAFMImage@lines<-length(unique(newAFMImage@data$x)) # newAFMImage@fullfilename<-paste(AFMImage@fullfilename, "extract.txt", sep="-") return(newAFMImage) } #' simplify an AFM image. #' #' The simplification is taking a very simple gridded sample of the image. #' It can be useful to speed up display. #' #' \code{simplifyAFMImage} returns a simplified AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param newSamplesperline the new number of samplesperline of the AFMImage #' @param newLines the new number of lines of the AFMImage #' @return a new simplified \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-simplifyAFMImage #' @examples #' \dontrun{ #' data(AFMImageOfAluminiumInterface) #' anAFMImageExtract<-simplifyAFMImage(AFMImageOfAluminiumInterface,16,16) #' } #' simplifyAFMImage<-function(AFMImage, newSamplesperline, newLines) { print(paste("simplifyAFMImage", newSamplesperline, newLines)) # AFMImage<-AFMImageOfRegularPeaks # newSamplesperline<-16 # newLines<-16 # print(paste(newSamplesperline, newLines)) if (newSamplesperline> AFMImage@samplesperline) newSamplesperline<-AFMImage@samplesperline if (newLines> AFMImage@lines) newLines<-AFMImage@lines z<-matrix(AFMImage@data$h,nrow = AFMImage@lines,ncol = AFMImage@samplesperline) samplesperlineBy=ceil(AFMImage@samplesperline/newSamplesperline) samplesperlineIndices=seq(1,AFMImage@samplesperline, by=samplesperlineBy) linesBy=ceil(AFMImage@lines/newLines) linesIndices=seq(1,AFMImage@lines, by=linesBy) newZ=z[samplesperlineIndices,linesIndices] # print(paste(samplesperlineBy)) # print(paste(samplesperlineIndices)) # print(paste(linesBy)) # print(paste(linesIndices)) # print(newZ) scanby<-AFMImage@scansize/newSamplesperline endScan<-AFMImage@scansize*(1-1/newSamplesperline) newData = data.table(x = rep(seq(0,endScan, by= scanby), times = newLines), y = rep(seq(0,endScan, by= scanby), each = newSamplesperline), h =as.numeric(newZ)) # print(newData$x) # print(newData$y) newAFMImage=new("AFMImage", data=newData, vscansize=AFMImage@vscansize, hscansize=AFMImage@hscansize, scansize=AFMImage@scansize, samplesperline=newSamplesperline, lines=newLines, fullfilename=AFMImage@fullfilename) return(newAFMImage) } #' multiply the heights of an AFMImage #' #' \code{multiplyHeightsAFMImage} returns a simplified AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param multiplier the number to multiply the heights with #' @return an \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-multiplyHeightsAFMImage #' @examples #' \dontrun{ #' data(AFMImageOfAluminiumInterface) #' newAFMImage<-multiplyHeightsAFMImage(AFMImageOfAluminiumInterface,10) #' displayIn3D(newAFMImage,noLight=TRUE) #' } #' multiplyHeightsAFMImage<-function(AFMImage, multiplier) { newAFMImage<-copy(AFMImage) heights<-newAFMImage@data$h*multiplier newAFMImage@data$h<-heights return(newAFMImage) } #' filter the heights of an AFMImage with a minimun and a maximum value #' #' \code{filterAFMImage} returns a filtered AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param Min the minimun height value to keep #' @param Max the maximun height value to keep #' @return an \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-filterAFMImage #' filterAFMImage<-function(AFMImage, Min, Max) { newAFMImage<-copy(AFMImage) heights<-newAFMImage@data$h heights<-heights+abs(min(heights)) heights[heights<Min]<-0 heights[heights>Max]<-0 newAFMImage@data$h<-heights return(newAFMImage) } #' make a binary AFMImage setting all the heights different to 0 to 1. #' #' \code{makeBinaryAFMImage} returns a binary AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return an \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-makeBinaryAFMImage #' makeBinaryAFMImage<-function(AFMImage) { newAFMImage<-copy(AFMImage) heights<-newAFMImage@data$h heights[heights!=0]<-1 newAFMImage@data$h<-heights return(newAFMImage) } #' invert a binary AFMImage #' #' \code{invertBinaryAFMImage} returns a binary AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return an \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-invertBinaryAFMImage #' #' @examples #' \dontrun{ #' library(AFM) #' data(AFMImageOfAluminiumInterface) #' newAFMImage<-copy(AFMImageOfAluminiumInterface) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-multiplyHeightsAFMImage(newAFMImage, multiplier=2) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-filterAFMImage(newAFMImage, Min=140, Max=300) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-makeBinaryAFMImage(newAFMImage) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-invertBinaryAFMImage(newAFMImage) #' displayIn3D(newAFMImage,noLight=TRUE) #' } invertBinaryAFMImage<-function(AFMImage){ # check if binary if (all(AFMImage@data$h %in% c(0,1))) { mm<-matrix(AFMImage@data$h, ncol =AFMImage@samplesperline) mm[mm == 0] <- 2 mm[mm == 1] <- 0 mm[mm == 2] <- 1 invertedBinaryAFMImage<-copy(AFMImage) invertedBinaryAFMImage@data$h<-as.vector(mm) return(invertedBinaryAFMImage) }else{ stop("AFMImage is not a binary AFMImage") } } #' calculate statistics about holes in a binary image #' #' \code{getHolesStatistics} returns a binary AFMImage #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return an \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @rdname AFMImage-getHolesStatistics #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' newAFMImage<-copy(AFMImageOfAluminiumInterface) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-multiplyHeightsAFMImage(newAFMImage, multiplier=2) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-filterAFMImage(newAFMImage, Min=140, Max=300) #' displayIn3D(newAFMImage,noLight=TRUE) #' newAFMImage<-makeBinaryAFMImage(newAFMImage) #' displayIn3D(newAFMImage,noLight=TRUE) #' #' holesStats<-getHolesStatistics(newAFMImage) #' print(holesStats) #' } getHolesStatistics<-function(AFMImage) { if (isBinary(AFMImage)) { invertBinaryAFMImage<-invertBinaryAFMImage(AFMImage) #displayIn3D(AFMImage=invertBinaryAFMImage, noLight=FALSE) mm<-matrix(invertBinaryAFMImage@data$h, ncol = invertBinaryAFMImage@samplesperline) #pimage(mm) res<-which(mm!=0,arr.ind = T) res islandsDT<-data.table(y=res[,1], x=res[,2]) rm(res) DBSCAN <- dbscan(islandsDT, eps = 1, MinPts = 3, borderPoints=FALSE) #plot(islandsDT$x, islandsDT$y, col = DBSCAN$cluster, pch = 20) islandsDT$cluster<-DBSCAN$cluster return(islandsDT) }else{ stop("AFMImage is not a binary AFMImage") } } #' has the AFM Image heights of 0 or 1 #' #' \code{isBinary} returns TRUE is the heights of the AFMImage is 0 or 1 #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return a boolean #' @author M.Beauvais #' @export #' @rdname AFMImage-isBinary isBinary<-function(AFMImage) { if (all(AFMImage@data$h %in% c(0,1))) { return(TRUE) } return(FALSE) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMImage.R
require("fftwtools") require("pracma") require("data.table") require("gstat") require(sp) require("stringr") # normality tests require(gridExtra) require(moments) require(ggplot2) #require(reshape2) # for reporting require(png) require(grid) if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("h", "..density..")) #' AFM image analyser class #' #' A S4 class to handle the analysis of one AFM Image. #' #' @slot AFMImage \code{\link{AFMImage}} to be analysed #' @slot variogramAnalysis \code{\link{AFMImageVariogramAnalysis}} #' @slot psdAnalysis \code{\link{AFMImagePSDAnalysis}} #' @slot fdAnalysis \code{\link{AFMImageFractalDimensionsAnalysis}} #' @slot gaussianMixAnalysis \code{\link{AFMImageGaussianMixAnalysis}} #' @slot networksAnalysis \code{\link{AFMImageNetworksAnalysis}} #' @slot mean the mean of heights of the \code{\link{AFMImage}} #' @slot variance the variance of heights of the \code{\link{AFMImage}} #' @slot TotalRrms the total Root Mean Square Roughness of the \code{\link{AFMImage}} calculated from variance #' @slot Ra mean roughness or mean of absolute values of heights #' @slot fullfilename to be removed ? #' @slot updateProgress a function to update a graphical user interface #' @name AFMImageAnalyser-class #' @rdname AFMImageAnalyser-class #' @exportClass AFMImageAnalyser #' @author M.Beauvais #' @include AFMVariogramAnalyser.R AFMPSDAnalyser.R AFMFractalDimensionAnalyser.R AFM3DPrinter.R AFMNetworksAnalyser.R AFM3DPrinter.R #' AFMImageAnalyser<-setClass("AFMImageAnalyser", slots = c( versions="data.table", AFMImage="AFMImage", variogramAnalysis="AFMImageVariogramAnalysis", psdAnalysis="AFMImagePSDAnalysis", fdAnalysis="AFMImageFractalDimensionsAnalysis", gaussianMixAnalysis="AFMImageGaussianMixAnalysis", networksAnalysis="AFMImageNetworksAnalysis", threeDimensionAnalysis="AFMImage3DModelAnalysis", mean="numeric", variance="numeric", TotalRrms="numeric", Ra="numeric", fullfilename="character", updateProgress="function")) #' Constructor method of AFMImageAnalyser Class. #' #' @param .Object an AFMImageAnalyser object #' @param AFMImage an \code{AFMImage} #' @param variogramAnalysis \code{\link{AFMImageVariogramAnalysis}} #' @param psdAnalysis \code{\link{AFMImagePSDAnalysis}} #' @param fdAnalysis \code{\link{AFMImageFractalDimensionsAnalysis}} #' @param gaussianMixAnalysis \code{\link{AFMImageGaussianMixAnalysis}} #' @param networksAnalysis \code{\link{AFMImageNetworksAnalysis}} #' @param threeDimensionAnalysis \code{\link{AFMImage3DModelAnalysis}} #' @param mean the mean of heights of the \code{\link{AFMImage}} #' @param variance the variance of heights of the \code{\link{AFMImage}} #' @param TotalRrms the total Root Mean Square Roughness of the \code{\link{AFMImage}} calculated from variance #' @param Ra mean roughness or mean of absolute values of heights #' @param fullfilename to be removed? #' @rdname AFMImageAnalyser-class-initialize #' @export setMethod("initialize", "AFMImageAnalyser", function(.Object, AFMImage, variogramAnalysis, psdAnalysis, fdAnalysis, gaussianMixAnalysis, networksAnalysis, threeDimensionAnalysis, mean, variance, TotalRrms, Ra, fullfilename) { if (!missing(AFMImage)) .Object@AFMImage<-AFMImage if (!missing(variogramAnalysis)) .Object@variogramAnalysis<-variogramAnalysis if (!missing(psdAnalysis)) .Object@psdAnalysis<-psdAnalysis if (!missing(fdAnalysis)) .Object@fdAnalysis<-fdAnalysis if (!missing(gaussianMixAnalysis)) .Object@gaussianMixAnalysis<-gaussianMixAnalysis if (!missing(networksAnalysis)) .Object@networksAnalysis<-networksAnalysis if (!missing(threeDimensionAnalysis)) .Object@threeDimensionAnalysis<-threeDimensionAnalysis if (!missing(mean)) .Object@mean<-mean if (!missing(variance)) .Object@variance<-variance if (!missing(TotalRrms)) .Object@TotalRrms<- TotalRrms if (!missing(Ra)) .Object@Ra<-Ra .Object@fullfilename<-fullfilename .Object@versions<-getLibrariesVersions() validObject(.Object) return(.Object) }) #' Wrapper function AFMImageAnalyser #' #' @param AFMImage an \code{AFMImage} #' @rdname AFMImageAnalyser-class #' @export AFMImageAnalyser <- function(AFMImage) { return(new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename= AFMImage@fullfilename)) } #' Analyse an AFMImage #' #' A function to wrap all the analysis of an \code{\link{AFMImage}} #' \itemize{ #' \item variogram analysis including evaluation of basic variogram models with sill and range calculation #' \item power spectrum density analysis including roughness against lengthscale calculation #' \item fractal dimension analysis including fractal dimensions calculation #' \item basic roughness parameters analysis such as mean, variance, Rrms, Ra #' } #' #' @param AFMImageAnalyser a \code{\link{AFMImageAnalyser}} to manage and store image analysis #' @return an \code{\link{AFMImageAnalyser}} containing all the analysis #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' AFMImage<-extractAFMImage(AFMImageOfAluminiumInterface, 0, 0, 32) #' AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename = AFMImage@@fullfilename) #' AFMImageAnalyser<-analyse(AFMImageAnalyser) #' print(AFMImageAnalyser@@fdAnalysis) #' } analyse<-function(AFMImageAnalyser) { AFMImage<-AFMImageAnalyser@AFMImage # Variogram analysis #TODO sampleFitPercentage<-3.43/100 #sampleFitPercentage<-0.13/100 variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage) variogramAnalysis@omnidirectionalVariogram<- calculateOmnidirectionalVariogram(AFMImageVariogramAnalysis= variogramAnalysis, AFMImage=AFMImage) variogramAnalysis@directionalVariograms<- calculateDirectionalVariograms(AFMImageVariogramAnalysis= variogramAnalysis, AFMImage=AFMImage) # manage model evaluations AFMImageVariogram<-variogramAnalysis@omnidirectionalVariogram class(AFMImageVariogram)=c("gstatVariogram","data.frame") variogramAnalysis<-evaluateVariogramModels(variogramAnalysis, AFMImage) # PSD analysis psdAnalysis<-AFMImagePSDAnalysis() roughnessAgainstLengthscale(psdAnalysis)<-RoughnessByLengthScale(AFMImage, psdAnalysis) AFMImageAnalyser@psdAnalysis<-psdAnalysis tryCatch({ intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= FALSE) intersections<-c(intersection) intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= TRUE) AFMImagePSDSlopesAnalysis<-intersection intersections<-c(intersections,intersection) intersections(psdAnalysis)<-intersections psdAnalysis@AFMImagePSDSlopesAnalysis<-AFMImagePSDSlopesAnalysis }, error = function(e) {print(paste("Impossible to find PSD intersections automaticaly",e))}) # fractal dimension analysis fdAnalysis<-AFMImageFractalDimensionsAnalysis() fractalDimensionMethods(fdAnalysis)<-getFractalDimensions(AFMImage, fdAnalysis) # basic roughness parameters AFMImageAnalyser@mean=mean(AFMImage@data$h) AFMImageAnalyser@variance=var(AFMImage@data$h) AFMImageAnalyser@TotalRrms=sqrt(var(AFMImage@data$h)) AFMImageAnalyser@Ra=mean(abs(AFMImage@data$h)) AFMImageAnalyser@variogramAnalysis<-variogramAnalysis AFMImageAnalyser@psdAnalysis<-psdAnalysis AFMImageAnalyser@fdAnalysis<-fdAnalysis return(AFMImageAnalyser) } #' Export all data from an analysis of an AFM Image as rdata file #' #' A function to export to all the data from all analysis of an \code{\link{AFMImage}} and put them on disk as rdata file #' #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} #' @param AFMImage an \code{\link{AFMImage}} #' #' @name putAnalysisOnDisk #' @rdname putAnalysisOnDisk-methods #' @exportMethod putAnalysisOnDisk #' @author M.Beauvais #' setGeneric(name= "putAnalysisOnDisk", def= function(AFMImageAnalyser, AFMImage) { return(standardGeneric("putAnalysisOnDisk")) }) #' @rdname putAnalysisOnDisk-methods #' @aliases putAnalysisOnDisk,AFMImageAnalyser-method setMethod(f="putAnalysisOnDisk", "AFMImageAnalyser", definition= function(AFMImageAnalyser, AFMImage) { filename<-basename(AFMImage@fullfilename) exportDirectory<-paste(dirname(AFMImage@fullfilename), "outputs", sep="/") saveAFMImageAnalyser(AFMImageAnalyser, AFMImage, exportDirectory) saveOnDisk(AFMImage, exportDirectory) # save AFMImage as rdata file }) setGeneric(name= "saveAFMImageAnalyser", def= function(AFMImageAnalyser, AFMImage, exportDirectory) { return(standardGeneric("saveAFMImageAnalyser")) }) setMethod(f="saveAFMImageAnalyser", "AFMImageAnalyser", definition= function(AFMImageAnalyser, AFMImage, exportDirectory) { filename<-basename(AFMImage@fullfilename) exportCsvFilename<-paste(filename,"AFMImageAnalyser.rda", sep="-") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") print(paste("saving", basename(exportCsvFullFilename))) tryCatch({ newVariableName=paste(filename,"AFMImageAnalyser.rda", sep="-") assign(newVariableName, AFMImageAnalyser) save(list=c(newVariableName), file=exportCsvFullFilename) }, error = function(e) {print("error",e)}) }) #' Calculate the total Root Mean Square Roughness (Rrms total) #' #' \code{totalRMSRoughness} returns the total RMS roughness calculated from the variance of heights #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return a numeric as the square root of the variance of heights #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' totalRMSRoughness<-totalRMSRoughness(AFMImageOfAluminiumInterface) #' print(totalRMSRoughness) #' } #' totalRMSRoughness<-function(AFMImage) { sqrt(var(AFMImage@data$h)) } #' Get Roughness parameters #' #' Get basic roughness parameters as amplitude parameters: #' Total root mean square Roughness or Total Rrms or totalRMSRoughness_TotalRrms\cr #' Mean roughness or Ra or MeanRoughness_Ra #' #' \code{getRoughnessParameters} returns a data.table of roughness parameters #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return a data.table of roughness parameters: #' \itemize{ #' \item totalRMSRoughness_TotalRrms the total RMS Roughness as the square root of the variance of heights #' \item MeanRoughness_Ra the average roughness as the mean of absolute value of heights #' } #' @author M.Beauvais #' @name getRoughnessParameters #' @rdname getRoughnessParameters-methods #' @exportMethod getRoughnessParameters #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' roughnessParameters<-getRoughnessParameters(AFMImageOfAluminiumInterface) #' print(roughnessParameters) #' } #' setGeneric(name= "getRoughnessParameters", def= function(AFMImage) { return(standardGeneric("getRoughnessParameters")) }) #' @rdname getRoughnessParameters-methods #' @aliases getRoughnessParameters,AFMImage-method setMethod(f="getRoughnessParameters", "AFMImage", definition= function(AFMImage) { # simple surface area area=AFMImage@hscansize*AFMImage@vscansize # surface parameters surfaceArea<- surfaceArea(matrix(AFMImage@data$h,nrow = AFMImage@lines,ncol = AFMImage@samplesperline), cellx = AFMImage@hscansize/AFMImage@samplesperline, celly = AFMImage@vscansize/AFMImage@lines, byCell = FALSE) # image(surfaceArea(matrix(AFMImage@data$h,nrow = AFMImage@lines,ncol = AFMImage@samplesperline), # cellx = AFMImage@hscansize/AFMImage@samplesperline, celly = AFMImage@vscansize/AFMImage@lines, # byCell = TRUE)) # amplitude parameters totalRMSRoughness_TotalRrms = sqrt(var(AFMImage@data$h)) MeanRoughness_Ra = mean(abs(AFMImage@data$h)) #MeanRoughnessDepth_RzDIN = #MaxProfileValleyDepth_Rmax= # spacing parameters # hybrid parameters return(data.table(totalRMSRoughness_TotalRrms, MeanRoughness_Ra, area, surfaceArea)) }) #' Check the isotropy of a sample #' #' \code{checkIsotropy} is used to check the isotropy of an \code{\link{AFMImage}}. #' A directional variogram is calculated for various directions. #' If the variogram is very similar for all the directions then the sample is isotropic. #' #' @param AFMImage an \code{\link{AFMImage}} to be analysed #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to perform the analysis #' @return an \code{\link{AFMImageAnalyser}} containing the directional variograms #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfAluminiumInterface) #' AFMImage<-extractAFMImage(AFMImageOfAluminiumInterface, 0, 0, 32) #' AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename = AFMImage@@fullfilename) #' AFMImageAnalyser<-checkIsotropy(AFMImage,AFMImageAnalyser) #' varios<-AFMImageAnalyser@@variogramAnalysis@@directionalVariograms #' p2 <- ggplot(varios, aes(x=dist, y=gamma, #' color= as.factor(dir.hor), shape=as.factor(dir.hor))) #' p2 <- p2 + expand_limits(y = 0) #' p2 <- p2 + geom_point() #' p2 <- p2 + geom_line() #' p2 <- p2 + ylab("semivariance (nm^2)") #' p2 <- p2 + xlab("distance (nm)") #' p2 <- p2 + ggtitle("Directional") #' p2 #' } #' checkIsotropy<-function(AFMImage, AFMImageAnalyser) { print("checking isotropy...") # Variogram analysis sampleFitPercentage<-3.43/100 variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage) if(!is.null(AFMImageAnalyser@updateProgress)) variogramAnalysis@updateProgress<-AFMImageAnalyser@updateProgress variogramAnalysis@directionalVariograms<- calculateDirectionalVariograms(AFMImage=AFMImage, AFMImageVariogramAnalysis=variogramAnalysis) AFMImageAnalyser@variogramAnalysis<-variogramAnalysis print("done.") return(AFMImageAnalyser) } #' Check visualy of the normality of the sample #' #' \code{checkNormality} performs a visual check to know if the distribution of heights of an \code{\link{AFMImage}} follows a normal distribution. The function displays Quantile/Quantile and distribution plots. #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param ... pngfullfilename (optional): directory and filename to save the visual check to png or pdffullfilename(optional): directory and filename to save the visual check to pdf #' #' @references Olea2006, Ricardo A. Olea "A six-step practical approach to semivariogram modeling", 2006, "Stochastic Environmental Research and Risk Assessment, Volume 20, Issue 5 , pp 307-318" #' #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' # display Quantile/Quantile and distribution plots. #' data(AFMImageOfNormallyDistributedHeights) #' checkNormality(AFMImage= AFMImageOfNormallyDistributedHeights) #' #' # display and save on disk Quantile/Quantile and distribution plots. #' data(AFMImageOfNormallyDistributedHeights) #' checkNormality(AFMImage= AFMImageOfNormallyDistributedHeights, #' pngfullfilename=paste(tempdir(), "checkNormality.png", sep="/")) #' } checkNormality<- function(..., AFMImage) { force(AFMImage) fullfilename<-AFMImage@fullfilename args<-names(list(...)) toPng<-c(match('pngfullfilename',args)!=-1) toPdf<-c(match('pdffullfilename',args)!=-1) qq <- checkNormalityQQ(AFMImage) m <- checkNormalityDensity(AFMImage) toFile<-FALSE if (!is.na(toPng)) { reportName<-paste(fullfilename, "-normality-checks",".png",sep="") print(paste("saving", basename(reportName))) png(reportName, width=1280, height=800, res=200) toFile<-TRUE } if (!is.na(toPdf)) { reportName<-paste(fullfilename, "-normality-checks",".pdf",sep="") print(paste("saving", basename(reportName))) pdf(reportName, width=11.69, height=8.27) toFile<-TRUE } if (toFile) { title<- paste("Normality tests for ",basename(fullfilename)) }else{ title<-"Normality tests" } grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(3, 2, heights = unit(c(0.5, 5, 0.5), "null")))) print(qq, vp = viewport(layout.pos.row = 2, layout.pos.col = 1)) print(m, vp = viewport(layout.pos.row = 2, layout.pos.col = 2)) grid.text(title, vp = viewport(layout.pos.row = 1, layout.pos.col = 1:2)) other<-paste("mean=", mean(AFMImage@data$h), "- skewness=", moments::skewness(AFMImage@data$h)) grid.text(other, vp = viewport(layout.pos.row = 3, layout.pos.col = 1:2)) if (toFile) { dev.off() #print("exist") } } checkNormalityQQ<- function(AFMImage) { h<-NULL ggplot(data=AFMImage@data, mapping=aes(sample=h)) + stat_qq() + geom_abline(intercept = mean(AFMImage@data$h), slope = sd(AFMImage@data$h)) } checkNormalityDensity<- function(AFMImage) { h<-..density..<-NULL ggplot(AFMImage@data, aes(x=h)) + geom_histogram( aes(y=..density..), colour="black", fill="white") + stat_function(fun = dnorm, args = list(mean = mean(AFMImage@data$h), sd = sd(AFMImage@data$h))) } getLibrariesVersions<-function() { v<-data.table(installed.packages()) Package<-NULL AFMPackage<-v[Package=="AFM",] gstatPackage<-v[Package=="gstat",] fractaldimPackage<-v[Package=="fractaldim",] fftwtoolsPackage<-v[Package=="fftwtools",] versions=data.table(lib=c("AFM", "gstat", "fractaldim", "fftwtools"), version=c(AFMPackage[1,]$Version, gstatPackage[1,]$Version, fractaldimPackage[1,]$Version, fftwtoolsPackage[1,]$Version) ) return(versions) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMImageAnalyser.R
require(igraph) require(dbscan) require(data.table) require(sp) require(parallel) require(mixtools) require(grDevices) HASHSIZE<-512*512 RADIUS_MULTIPLIER<-2 BIGGER_CIRCLE_RADIUS<-4 BIGGER_CIRCLE_RADIUS_MULTILPLIER<-2 SAMPLE_ON_THIN_PORTIONS<-25 # percents MAX_DISTANCE<-64 AREA_MIN <-25 CLUSTER_COUNT_MIN <- 45 CIRCLE_RADIUS_INIT <- 25 setOldClass("igraph") #' AFM image networks analysis class #' #' A S4 class to handle the networks calculation #' #' @slot vertexHashsize hash to transform coordinates to vertexId #' @slot binaryAFMImage the AFMImage after transformation before analysis #' @slot binaryAFMImageWithCircles the AFMImage after transformation with the spotted circles #' @slot circlesTable a data.table of identified circles #' @slot edgesTable a data.table of edges #' @slot fusionedNodesCorrespondance a data.table of corresponsdance between intial node and fusioned node #' @slot fusionedNodesEdgesTable a data.table of nodes fusioned because of intersecting #' @slot isolatedNodesTable a data.table of isolated nodes #' @slot heightNetworksslider used multiplier of heights to facilitate analysis #' @slot filterNetworkssliderMin used filter minimum value to facilitate analysis #' @slot filterNetworkssliderMax used filter maximum value to facilitate analysis #' @slot smallBranchesTreatment boolean - smallest circle used or not #' @slot originalGraph a list of \code{\link{igraph}} #' @slot skeletonGraph a list of \code{\link{igraph}} #' @slot shortestPaths a data.table of shortest paths #' @slot networksCharacteristics a data.table to store the skeleton graph characteristics #' @slot graphEvcent an array to store Evcent #' @slot graphBetweenness an array to store the graph betweenness #' @slot libVersion version of the AFM library used to perform the analysis #' @slot updateProgress a function to update a graphical user interface #' @name AFMImageNetworksAnalysis-class #' @rdname AFMImageNetworksAnalysis-class #' @exportClass AFMImageNetworksAnalysis #' @author M.Beauvais AFMImageNetworksAnalysis<-setClass("AFMImageNetworksAnalysis", slots = c( vertexHashsize="numeric", binaryAFMImage="AFMImage", binaryAFMImageWithCircles="AFMImage", circlesTable="data.table", edgesTable="data.table", fusionedNodesCorrespondance="data.table", fusionedNodesEdgesTable="data.table", isolatedNodesList="numeric", heightNetworksslider="numeric", filterNetworkssliderMin="numeric", filterNetworkssliderMax="numeric", smallBranchesTreatment="logical", originalGraph="igraph", skeletonGraph="igraph", shortestPaths="data.table", networksCharacteristics="data.table", holes="data.table", holesCharacteristics="data.table", graphEvcent="numeric", graphBetweenness="numeric", libVersion="character", updateProgress="function")) #' Constructor method of AFMImageNetworksAnalysis Class. #' #' @param .Object an AFMImageNetworksAnalysis Class #' @param vertexHashsize hash to transform coordinates to vertexId #' @param binaryAFMImage the AFMImage after transformation before analysis #' @param binaryAFMImageWithCircles the AFMImage after transformation with the spotted circles #' @param circlesTable a data.table of identified circles #' @param edgesTable a data.table of edges #' @param fusionedNodesCorrespondance a data.table of correspon #' @param fusionedNodesEdgesTable a data.table of corresponsdance between intial node and fusioned node #' @param isolatedNodesList a data.table of isolated nodes #' @param heightNetworksslider used multiplier of heights to facilitate analysis #' @param filterNetworkssliderMin used filter minimum value to facilitate analysis #' @param filterNetworkssliderMax used filter maximum value to facilitate analysis #' @param smallBranchesTreatment boolean - smallest circle used or not #' @param originalGraph a list of \code{\link{igraph}} #' @param skeletonGraph a list of \code{\link{igraph}} #' @param shortestPaths a data.table of shortest path #' @param networksCharacteristics a data.table to store the skeleton graph characteristics #' @param holes a data.table to store the cluster number of each point #' @param holesCharacteristics a data.table to summarize the data about holes #' @param graphEvcent an array to store Evcent #' @param graphBetweenness an array to store the graph betweenness #' @param libVersion version of the AFM library used to perform the analysis #' @rdname AFMImageNetworksAnalysis-class #' @export setMethod("initialize", "AFMImageNetworksAnalysis", function(.Object, vertexHashsize, binaryAFMImage, binaryAFMImageWithCircles, circlesTable, edgesTable, fusionedNodesCorrespondance, fusionedNodesEdgesTable, isolatedNodesList, heightNetworksslider, filterNetworkssliderMin, filterNetworkssliderMax, smallBranchesTreatment, originalGraph, skeletonGraph, shortestPaths, networksCharacteristics, holes, holesCharacteristics, graphEvcent, graphBetweenness, libVersion) { if(!missing(vertexHashsize)) .Object@vertexHashsize<-vertexHashsize if(!missing(binaryAFMImage)) .Object@binaryAFMImage<-binaryAFMImage if(!missing(binaryAFMImageWithCircles)) .Object@binaryAFMImageWithCircles<-binaryAFMImageWithCircles if(!missing(circlesTable)) .Object@circlesTable<-circlesTable if(!missing(edgesTable)) .Object@edgesTable<-edgesTable if(!missing(fusionedNodesCorrespondance)) .Object@fusionedNodesCorrespondance<-fusionedNodesCorrespondance if(!missing(fusionedNodesEdgesTable)) .Object@fusionedNodesEdgesTable<-fusionedNodesEdgesTable if(!missing(isolatedNodesList)) .Object@isolatedNodesList<-isolatedNodesList if(!missing(originalGraph)) .Object@originalGraph<-originalGraph if(!missing(skeletonGraph)) .Object@skeletonGraph<-skeletonGraph if(!missing(shortestPaths)) .Object@shortestPaths<-shortestPaths if(!missing(networksCharacteristics)) .Object@networksCharacteristics<-networksCharacteristics if(!missing(holes)) .Object@holes<-holes if(!missing(holesCharacteristics)) .Object@holesCharacteristics<-holesCharacteristics if(!missing(graphEvcent)) .Object@graphEvcent<-graphEvcent if(!missing(graphBetweenness)) .Object@graphBetweenness<-graphBetweenness if(!missing(heightNetworksslider)) .Object@heightNetworksslider<-heightNetworksslider if(!missing(filterNetworkssliderMin)) .Object@filterNetworkssliderMin<-filterNetworkssliderMin if(!missing(filterNetworkssliderMax)) .Object@filterNetworkssliderMax<-filterNetworkssliderMax if(!missing(smallBranchesTreatment)) .Object@smallBranchesTreatment<-smallBranchesTreatment #if(!missing(libVersion)) .Object@libVersion<-as.character(packageVersion("AFM")) #validObject(.Object) return(.Object) }) #' Wrapper function AFMImageNetworksAnalysis #' #' @rdname AFMImageNetworksAnalysis-class #' @export AFMImageNetworksAnalysis <- function() { return(new("AFMImageNetworksAnalysis")) } #' Multiply, filter the heights and make a binary AFMImage from the transformed AFMImage #' #' \code{transformAFMImageForNetworkAnalysis} update \code{\link{AFMImageNetworksAnalysis}} making a binary AFMImage #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageNetworksAnalysis n \code{\link{AFMImageNetworksAnalysis}} to store the results of the transformation #' #' @name transformAFMImageForNetworkAnalysis #' @rdname transformAFMImageForNetworkAnalysis-methods #' @exportMethod transformAFMImageForNetworkAnalysis #' @author M.Beauvais setGeneric(name= "transformAFMImageForNetworkAnalysis", def= function(AFMImageNetworksAnalysis, AFMImage) { return(standardGeneric("transformAFMImageForNetworkAnalysis")) }) #' @rdname transformAFMImageForNetworkAnalysis-methods #' @aliases transformAFMImageForNetworkAnalysis,AFMImage-method setMethod(f="transformAFMImageForNetworkAnalysis", "AFMImageNetworksAnalysis", definition= function(AFMImageNetworksAnalysis, AFMImage) { newAFMImage<-multiplyHeightsAFMImage(AFMImage, multiplier=AFMImageNetworksAnalysis@heightNetworksslider) newAFMImage<-filterAFMImage(newAFMImage, Min=AFMImageNetworksAnalysis@filterNetworkssliderMin, Max=AFMImageNetworksAnalysis@filterNetworkssliderMax) newAFMImage<-makeBinaryAFMImage(newAFMImage) AFMImageNetworksAnalysis@binaryAFMImage<-copy(newAFMImage) AFMImageNetworksAnalysis@vertexHashsize<-newAFMImage@samplesperline*newAFMImage@lines return(AFMImageNetworksAnalysis) }) #' Calculate networks on the surface #' #' \code{calculateNetworks} update \code{\link{AFMImageNetworksAnalysis}} #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageNetworksAnalysis n \code{\link{AFMImageNetworksAnalysis}} to store the results of networks analysis #' #' @name calculateNetworks #' @rdname calculateNetworks-methods #' @exportMethod calculateNetworks #' @author M.Beauvais setGeneric(name= "calculateNetworks", def= function(AFMImageNetworksAnalysis, AFMImage) { return(standardGeneric("calculateNetworks")) }) #' @rdname calculateNetworks-methods #' @aliases calculateNetworks,AFMImage-method setMethod(f="calculateNetworks", "AFMImageNetworksAnalysis", definition= function(AFMImageNetworksAnalysis, AFMImage) { counter<-0 totalLength<-2 if (!is.null(AFMImageNetworksAnalysis@updateProgress)&& is.function(AFMImageNetworksAnalysis@updateProgress)&& !is.null(AFMImageNetworksAnalysis@updateProgress())) { text <- paste0("Creating networks") AFMImageNetworksAnalysis@updateProgress(value= 0, detail = text) counter<-counter+1 value<-counter / totalLength text <- paste0("Creating networks", round(counter, 2),"/",totalLength) AFMImageNetworksAnalysis@updateProgress(value= value, detail = text) print("update") } AFMImageNetworksAnalysis@originalGraph<-calculateIgraph(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis, AFMImage = AFMImage) if (!is.null(AFMImageNetworksAnalysis@updateProgress)&& is.function(AFMImageNetworksAnalysis@updateProgress)&& !is.null(AFMImageNetworksAnalysis@updateProgress())) { text <- paste0("Creating networks skeleton") AFMImageNetworksAnalysis@updateProgress(value= 0, detail = text) counter<-counter+1 value<-counter / totalLength text <- paste0("Creating networks", round(counter, 2),"/",totalLength) AFMImageNetworksAnalysis@updateProgress(value= value, detail = text) print("update") } AFMImageNetworksAnalysis<-calculateNetworkSkeleton(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis, AFMImage = AFMImage) return(AFMImageNetworksAnalysis) }) #' Get Network parameters #' #' Get basic network parameters : #' Total root mean square Roughness or Total Rrms or totalRMSRoughness_TotalRrms\cr #' Mean roughness or Ra or MeanRoughness_Ra #' #' \code{getNetworkParameters} returns a data.table of network parameters #' @param AFMImageNetworksAnalysis an \code{\link{AFMImageNetworksAnalysis}} #' @param AFMImage an \code{\link{AFMImage}} #' @return a data.table of network parameters: #' \itemize{ #' \item totalNumberOfNodes the total number of nodes with degree different of 2 #' \item totalNumberOfNodesWithDegreeTwoOrMore the total number of nodes with degree 2 or more #' \item totalNumberOfNodesWithDegreeOne the total number of nodes with degree one #' \item numberOfNodesPerArea the total number of nodes with degree diffrent of 2 per area #' \item numberOfNodesPerSurfaceArea the total number of nodes with degree diffrent of 2 per surface area #' \item MeanPhysicalDistanceBetweenNodes the mean physical distance between nodes of degree different of two #' } #' @author M.Beauvais #' @name getNetworkParameters #' @rdname getNetworkParameters-methods #' @exportMethod getNetworkParameters #' @examples #' \dontrun{ #' library(AFM) #' library(parallel) #' #' data(AFMImageCollagenNetwork) #' AFMImage<-AFMImageCollagenNetwork #' AFMIA = new("AFMImageNetworksAnalysis") #' AFMIA@heightNetworksslider=10 #' AFMIA@filterNetworkssliderMin=150 #' AFMIA@filterNetworkssliderMax=300 #' AFMIA@smallBranchesTreatment=TRUE #' clExist<-TRUE #' cl <- makeCluster(2,outfile="") #' AFMIA<-transformAFMImageForNetworkAnalysis(AFMImageNetworksAnalysis=AFMIA,AFMImage= AFMImage) #' AFMIA<-identifyNodesAndEdges(cl=cl,AFMImageNetworksAnalysis= AFMIA,maxHeight= 300) #' AFMIA<-identifyEdgesFromCircles(cl=cl,AFMImageNetworksAnalysis= AFMIA, MAX_DISTANCE = 75) #' AFMIA<-identifyIsolatedNodes(AFMIA) #' AFMIA<-createGraph(AFMIA) #' AFMIA<-calculateShortestPaths(cl=cl, AFMImageNetworksAnalysis=AFMIA) #' AFMIA<-calculateNetworkParameters(AFMImageNetworksAnalysis=AFMIA, AFMImage=AFMImage) #' AFMIA<-calculateHolesCharacteristics(AFMImageNetworksAnalysis=AFMIA) #' stopCluster(cl) #' } setGeneric(name= "getNetworkParameters", def= function(AFMImageNetworksAnalysis, AFMImage) { return(standardGeneric("getNetworkParameters")) }) #' @rdname getNetworkParameters-methods #' @aliases getNetworkParameters,AFMImage-method setMethod(f="getNetworkParameters", "AFMImageNetworksAnalysis", definition= function(AFMImageNetworksAnalysis, AFMImage) { node_degree<-NULL # get parameters about the image param<-getRoughnessParameters(AFMImage) # network parameters g<-AFMImageNetworksAnalysis@skeletonGraph verticesAnalysisDT<-data.table(vid=V(g)$name, node_degree=unname(degree(g))) verticesAnalysisDT directedConnectedNodesDT<-AFMImageNetworksAnalysis@shortestPaths directedConnectedNodesDT #Total Number of nodes totalNumberOfNodes<-nrow(verticesAnalysisDT[node_degree!=2]) #Surface area<-param$area #Number of nodes with degree > 2 totalNumberOfNodesWithDegreeTwoOrMorePerArea<-nrow(verticesAnalysisDT[node_degree>2])/area #Number of nodes with degree = 1 totalNumberOfNodesWithDegreeOnePerArea<-nrow(verticesAnalysisDT[node_degree==1])/area #Surface area of a grid of heights surfaceArea<-param$surfaceArea #Nodes (degree>2 or =1) / area numberOfNodesPerArea<-(nrow(verticesAnalysisDT[node_degree>2])+nrow(verticesAnalysisDT[node_degree==1]))/area #Nodes (degree>2 or =1) / surface area numberOfNodesPerSurfaceArea<-(nrow(verticesAnalysisDT[node_degree>2])+nrow(verticesAnalysisDT[node_degree==1]))/surfaceArea #Mean physical distance between nodes (degree>2) MeanPhysicalDistanceBetweenNodes<-mean(directedConnectedNodesDT$physicalDistance) resultDT=data.table(totalNumberOfNodes=totalNumberOfNodes, totalNumberOfNodesWithDegreeTwoOrMorePerArea=totalNumberOfNodesWithDegreeTwoOrMorePerArea, totalNumberOfNodesWithDegreeOnePerArea=totalNumberOfNodesWithDegreeOnePerArea, numberOfNodesPerArea=numberOfNodesPerArea, numberOfNodesPerSurfaceArea=numberOfNodesPerSurfaceArea, MeanPhysicalDistanceBetweenNodes=MeanPhysicalDistanceBetweenNodes) return(resultDT) }) #' Get vertex id from x,y coordinates #' #' \code{getVertexId} return the vertexId #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param x coordinates in x axis #' @param y coordinates in y axis #' @author M.Beauvais #' @export getVertexId<-function(AFMImage,x,y) { if ((x<0)||(x>AFMImage@samplesperline)|| (y<0)||(y>AFMImage@lines)) return(-1) #print(paste("getVertexId",x,y,as.numeric(x+HASHSIZE*y))) #return(as.numeric(x+AFMImage@samplesperline*y)) return(as.numeric(x+HASHSIZE*y)) } #' Calculate Gaussian Mixture with two components from the AFM Image. #' #' \code{calculateGaussianMixture} return a data.table containing the result of the Gaussian Mixture and result of the test #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' data(AFMImageOfNetworks) #' mixtureCharacteristics<-calculateGaussianMixture(AFMImageOfNetworks) #' print(mixtureCharacteristics) #' } calculateGaussianMixture<-function(AFMImage) { k = 2 filename<-AFMImage@fullfilename heights<-AFMImage@data$h*10 heights<-heights+abs(min(heights)) #heights[heights>300]<-300 mixmdl = normalmixEM(heights,k=2, arbmean = TRUE) summary(mixmdl) mixmdl # CDF of mixture of two normals pmnorm <- function(x, mu, sigma, pmix) { pmix[1]*pnorm(x,mu[1],sigma[1]) + (1-pmix[1])*pnorm(x,mu[2],sigma[2]) } test <- ks.test(heights, pmnorm, mu=mixmdl$mu, sigma=mixmdl$sigma, pmix=mixmdl$lambda) print(test) if (mixmdl$mu[1]<mixmdl$mu[2]) { invert=0 min_mu<-mixmdl$mu[1] min_sigma<-mixmdl$sigma[1] min_lambda<-mixmdl$lambda[1] max_mu<-mixmdl$mu[2] max_sigma<-mixmdl$sigma[2] max_lambda<-mixmdl$lambda[2] }else{ invert=1 min_mu<-mixmdl$mu[2] min_sigma<-mixmdl$sigma[2] min_lambda<-mixmdl$lambda[2] max_mu<-mixmdl$mu[1] max_sigma<-mixmdl$sigma[1] max_lambda<-mixmdl$lambda[1] } gaussianMixture<- data.table(filename=filename, invert=invert, min_mu=min_mu, min_sigma=min_sigma, min_lambda=min_lambda, max_mu=max_mu, max_sigma=max_sigma, max_lambda=max_lambda, ks_test_pvalue=test$p.value, ks_test_D=unname(test$statistic)) return(gaussianMixture) } #' Get x,y coordinates from vertex id #' #' \code{getCoordinatesFromVertexId} return a list x,y coordinates #' #' @param vId the vertex id #' @author M.Beauvais #' @export getCoordinatesFromVertexId<-function(vId) { vertexId<-as.numeric(vId) y<-floor(vertexId/HASHSIZE) x<-vertexId-y*HASHSIZE return(data.table(vId=vId, coords.x1=x,coords.x2=y)) } # getCoordinatesFromVertexId<-function(AFMImage, vId) { # # vertexId<-as.numeric(vId) # # y<-floor(vertexId/HASHSIZE) # # x<-vertexId-y*HASHSIZE # # return(c(x,y)) # vertexId<-as.numeric(vId) # y<-floor(vertexId/HASHSIZE) # x<-vertexId-y*HASHSIZE # return(data.table(vId=vId, coords.x1=x,coords.x2=y)) # } #' #' @export #' getCoordinatesFromVertexId2<-function(AFMImage, vId) { #' vertexId<-as.numeric(vId) #' y<-floor(vertexId/HASHSIZE) #' x<-vertexId-y*HASHSIZE #' return(data.table(vId=vId, coords.x1=x,coords.x2=y)) #' } #' Get getNetworkGridLayout #' #' \code{getNetworkGridLayout} return a list x,y coordinates #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param vId the vertex id #' @author M.Beauvais #' @export getNetworkGridLayout<-function(AFMImage, vId) { vertexId<-as.numeric(vId) y<-floor(vertexId/HASHSIZE) x<-vertexId-y*HASHSIZE return(data.table(x=x,y=y)) } #' Does an edge exist ? #' #' \code{existsEdge} return TRUE if an edge exists for this vertex id #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param vertexId the vertex id #' @author M.Beauvais #' @export existsEdge<-function(AFMImage, vertexId) { # print(vertexId) if ((vertexId<1)||(vertexId>(AFMImage@samplesperline+HASHSIZE*(AFMImage@lines-1)))) { # print("return FALSE") return(FALSE) } # print(vertexId) coordinates<-getCoordinatesFromVertexId(vertexId) # print(coordinnates) id<-coordinates[1]+AFMImage@samplesperline*coordinates[2] # print(id) if (AFMImage@data$h[id]>0) { # print("return TRUE") return(TRUE) } # print("return FALSE") return(FALSE) } #' Get surrounding vertices from x,y coordinates #' #' \code{getSurroundingVertexesList} return the vertexId #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param x coordinates in x axis #' @param y coordinates in y axis #' @author M.Beauvais #' @export getSurroundingVertexesList<-function(AFMImage,x,y) { # print(x) # print(y) horizontalWeight<-AFMImage@hscansize/AFMImage@samplesperline verticalWeight<-AFMImage@vscansize/AFMImage@lines diagWeight<-sqrt((AFMImage@vscansize/AFMImage@lines)^2+(AFMImage@hscansize/AFMImage@samplesperline)^2) currentVertexId<-getVertexId(AFMImage,x,y) vList=data.table() #x+1 y nearVertexId<-getVertexId(AFMImage,x+1,y) # print(nearVertexId) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(horizontalWeight))) #x+1 y+1 nearVertexId<-getVertexId(AFMImage,x+1,y+1) #print(existsEdge(AFMImage, nearVertexId)) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(diagWeight))) #x y+1 nearVertexId<-getVertexId(AFMImage,x,y+1) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(verticalWeight))) #x-1 y+1 nearVertexId<-getVertexId(AFMImage,x-1,y+1) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(diagWeight))) #x-1 y nearVertexId<-getVertexId(AFMImage,x-1,y) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(horizontalWeight))) #x-1 y-1 nearVertexId<-getVertexId(AFMImage,x-1,y-1) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(diagWeight))) #x y-1 nearVertexId<-getVertexId(AFMImage,x,y-1) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(verticalWeight))) #x+1 y-1 nearVertexId<-getVertexId(AFMImage,x+1,y-1) if (existsEdge(AFMImage, nearVertexId)) vList<-rbind(vList, data.table(from=as.character(currentVertexId), to=as.character(nearVertexId), weight=as.numeric(diagWeight))) return(vList) } #' isAdjacentToBetterVertex #' #' \code{isAdjacentToBetterVertex} return TRUE if vertex is adjacent to a better vertex #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param x coordinates in x axis #' @param y coordinates in y axis #' @author M.Beauvais #' @export isAdjacentToBetterVertex<-function(AFMImage,x,y) { # print(x) # print(y) currentVertexId<-getVertexId(AFMImage,x,y) currentH<-AFMImage@data$h[currentVertexId] if(currentH<=0) return(FALSE) #x+1 y nearVertexId<-getVertexId(AFMImage,x+1,y) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x+1 y+1 nearVertexId<-getVertexId(AFMImage,x+1,y+1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x y+1 nearVertexId<-getVertexId(AFMImage,x,y+1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x-1 y+1 nearVertexId<-getVertexId(AFMImage,x-1,y+1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x-1 y nearVertexId<-getVertexId(AFMImage,x-1,y) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x-1 y-1 nearVertexId<-getVertexId(AFMImage,x-1,y-1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x y-1 nearVertexId<-getVertexId(AFMImage,x,y-1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) #x+1 y-1 nearVertexId<-getVertexId(AFMImage,x+1,y-1) if ((nearVertexId>0)&(currentH<=AFMImage@data$h[nearVertexId])) return(TRUE) return(FALSE) } #' gridIgraphPlot #' #' \code{gridIgraphPlot} return TRUE if vertex is adjacent to a better vertex #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param g the networks #' @author M.Beauvais #' @export gridIgraphPlot<-function(AFMImage, g){ # define the layout matrix coordinatesVector<-getNetworkGridLayout(AFMImage, V(g)$name) #coordinatesVector l<-matrix(coordinatesVector$y ,byrow = TRUE) l<-cbind(l, coordinatesVector$x) #l # plot(all, layout=All_layout, vertex.size=2, vertex.label=V(All)$name, # vertex.color="green", vertex.frame.color="red", edge.color="grey", # edge.arrow.size=0.01, rescale=TRUE,vertex.label=NA, vertex.label.dist=0.0, # vertex.label.cex=0.5, add=FALSE, vertex.label.font=.001) plot(g, layout=l, vertex.shape="circle", vertex.size=2, vertex.label=NA, vertex.color="red", vertex.frame.color="red", edge.color="grey" ) } #' Calculate iGraph from AFMImage #' #' \code{calculateIgraph} return #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageNetworksAnalysis an \code{\link{AFMImageNetworksAnalysis}} from Atomic Force Microscopy #' @author M.Beauvais #' @export calculateIgraph<-function(AFMImage, AFMImageNetworksAnalysis) { if (missing(AFMImageNetworksAnalysis)) { AFMImageNetworksAnalysis<-NULL } graphicalUpdate<-FALSE graphicalCounter<-0 if (!is.null(AFMImageNetworksAnalysis)&& !is.null(AFMImageNetworksAnalysis@updateProgress)&& is.function(AFMImageNetworksAnalysis@updateProgress)&& !is.null(AFMImageNetworksAnalysis@updateProgress())) { graphicalUpdate<-TRUE totalLength<-AFMImage@samplesperline*(AFMImage@lines-1) } if (graphicalUpdate) { AFMImageNetworksAnalysis@updateProgress(message="1/2 - Generating edges list", value=0) } print(paste("Generating edge list")) counter<-1 #edgeList=data.table() edgeList <- vector("list", AFMImage@samplesperline*AFMImage@lines+1) for (x in seq(1: AFMImage@samplesperline)) { for (y in seq(1: (AFMImage@lines-1))) { currentVertexId<-getVertexId(AFMImage,x,y) if (existsEdge(AFMImage, currentVertexId)) { #edgeList<-rbind(edgeList, getSurroundingVertexesList(AFMImage,x,y)) edgeList[[counter]] <- getSurroundingVertexesList(AFMImage,x,y) counter<-counter+1 } if (graphicalUpdate) { graphicalCounter<-graphicalCounter+1 if (graphicalCounter/100==floor(graphicalCounter/100)) { value<-graphicalCounter / totalLength text <- paste0(round(graphicalCounter, 2),"/",totalLength) AFMImageNetworksAnalysis@updateProgress(value= 0, detail = text) } } } } if (graphicalUpdate) { AFMImageNetworksAnalysis@updateProgress(message="2/2 - Generating network", value=0) } newEdgeList<-rbindlist(edgeList) el=as.matrix(newEdgeList) print(paste("Creating graph")) g<-graph_from_edgelist(el[,1:2], directed=FALSE) print(paste("Created",counter,"vertices")) AFMImageNetworksAnalysis@originalGraph<-g return(g) } #' getListOfDiameters #' #' \code{getListOfDiameters} return #' #' @param g list of igraph networks #' @author M.Beauvais #' @export getListOfDiameters<-function(g) { LIST_OF_DIAMETERS = c() listOfGraph=decompose(g) for(g in listOfGraph){ LIST_OF_DIAMETERS=c(LIST_OF_DIAMETERS, diameter(g, directed = FALSE, unconnected = TRUE, weights = NULL)) } return(LIST_OF_DIAMETERS) } #' canBeRemoved #' #' \code{canBeRemoved} return #' #' @param vertexId a vertex id #' @param g a igraph #' @param allVertices list of all vertices #' @param DEGREE_LIMIT_FOR_CANDIDATE_VERTICE degree #' #' @author M.Beauvais #' @export canBeRemoved<-function(vertexId, g, allVertices, DEGREE_LIMIT_FOR_CANDIDATE_VERTICE) { avList<-adjacent_vertices(g, v=c(vertexId), mode = c("all")) avListNew<-unique(avList[[vertexId]]$name) found<-NULL if (nrow(allVertices[, c("found"):=vertexId %in% avListNew & degree<(DEGREE_LIMIT_FOR_CANDIDATE_VERTICE+1)][found==TRUE])>0) { return(FALSE) }else{ return(TRUE) } } #' calculateNetworkSkeleton #' #' \code{calculateNetworkSkeleton} return #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageNetworksAnalysis an \code{\link{AFMImageNetworksAnalysis}} from Atomic Force Microscopy #' @author M.Beauvais #' @export calculateNetworkSkeleton<-function(AFMImage, AFMImageNetworksAnalysis) { if (missing(AFMImageNetworksAnalysis)) { AFMImageNetworksAnalysis<-NULL return(new("list")) } g<-AFMImageNetworksAnalysis@originalGraph graphicalUpdate<-FALSE graphicalCounter<-0 if (!is.null(AFMImageNetworksAnalysis)&& !is.null(AFMImageNetworksAnalysis@updateProgress)&& is.function(AFMImageNetworksAnalysis@updateProgress)&& !is.null(AFMImageNetworksAnalysis@updateProgress())) { graphicalUpdate<-TRUE totalLength<-length(V(g)) } DEGREE_LIMIT_FOR_CANDIDATE_VERTICE=4 NUMBER_OF_NETWORKS = length(decompose(g)) LIST_OF_DIAMETERS<-getListOfDiameters(g) print(LIST_OF_DIAMETERS) # distance_table(g, directed = FALSE) # coreness(g) verticesThatCantBeRemovedList=c() print(paste("starting with ", length(V(g)), " vertices")) if (graphicalUpdate) { AFMImageNetworksAnalysis@updateProgress(message="1/1 - removing vertices and edges", value=0) } continueExploration<-TRUE while(continueExploration) { edgeList<-V(g)$name uniqueVerticesList<-unique(edgeList) uniqueVerticesList # degree de chaque noeud edgeDegreeList<-degree(g, v=uniqueVerticesList, mode = c("all"), loops = FALSE, normalized = FALSE) edgeDegreeList # liste ordonn'e9e croissante des noeuds en fonction du degree allVertices<-data.table(vertexId=uniqueVerticesList, degree=edgeDegreeList) # get-list of adjacent vertices with degree > 2 (can't remove if degree < 2) allVertices<-allVertices[order(degree)] listOfCandidateVertices<-allVertices[degree>DEGREE_LIMIT_FOR_CANDIDATE_VERTICE] listOfCandidateVertices<-listOfCandidateVertices[!listOfCandidateVertices$vertexId %in% verticesThatCantBeRemovedList] continueExploration<-FALSE if (nrow(listOfCandidateVertices)>0) { # res<-sapply(listOfCandidateVertices$vertexId, canBeRemoved, g=g, allVertices=allVertices, simplify=F) # vMatrix<-as.matrix(res, ncol=2) # # verticesToBeRemoved<-data.table(vertexId= rownames(vMatrix), toBeRemoved= vMatrix[,1])[toBeRemoved==TRUE]$vertexId # print(paste("to be removed",verticesToBeRemoved)) # # if (length(verticesToBeRemoved)>0) { # g<-delete_vertices(g, c(verticesToBeRemoved)) # #continueExploration<-TRUE # continueExploration<-continueExploration+1 # } # for (vi in seq(1:nrow(listOfCandidateVertices))){ onevertexId=listOfCandidateVertices$vertexId[vi] if (canBeRemoved(onevertexId, g=g, allVertices=allVertices, DEGREE_LIMIT_FOR_CANDIDATE_VERTICE=DEGREE_LIMIT_FOR_CANDIDATE_VERTICE)) { vId<-listOfCandidateVertices$vertexId[vi] # store the list of adjacent vertices of the node before deleting it avList<-unique(adjacent_vertices(g, v=c(vId), mode = c("all"))[[vId]]$name) g<-delete_vertices(g, listOfCandidateVertices$vertexId[vi]) continueExploration<-TRUE NEW_LIST_OF_DIAMETERS=getListOfDiameters(g) #print(NEW_LIST_OF_DIAMETERS) # did the vertex removal split the network or diminish the diameter if ((length(decompose(g))>NUMBER_OF_NETWORKS)||(!identical(LIST_OF_DIAMETERS,NEW_LIST_OF_DIAMETERS))) { print (paste("should not have removed", vId)) verticesThatCantBeRemovedList=c(verticesThatCantBeRemovedList, listOfCandidateVertices$vertexId[vi]) g<-g+vertices(as.numeric(vId)) listOfEdges=c() for(j in seq(1,length(avList))) { listOfEdges=c(listOfEdges, vId, avList[j], avList[j],vId) } g<-g+edges(listOfEdges) }else{ print("61") NEW_LIST_OF_DIAMETERS=getListOfDiameters(g) if ((!identical(LIST_OF_DIAMETERS,NEW_LIST_OF_DIAMETERS))) { print (paste("should not have removed", vId)) verticesThatCantBeRemovedList=c(verticesThatCantBeRemovedList, listOfCandidateVertices$vertexId[vi]) g<-g+vertices(as.numeric(vId)) listOfEdges=c() for(j in seq(1,length(avList))) { listOfEdges=c(listOfEdges, vId, avList[j], avList[j],vId) } g<-g+edges(listOfEdges) } break } } } if (graphicalUpdate) { graphicalCounter<-graphicalCounter+1 value<-graphicalCounter / totalLength text <- paste0(round(graphicalCounter, 2),"/",totalLength) AFMImageNetworksAnalysis@updateProgress(value= 0, detail = text) } }else{ continueExploration<-FALSE } } print(paste("ending with ", length(V(g)), " vertices")) AFMImageNetworksAnalysis@skeletonGraph<-g return(AFMImageNetworksAnalysis) } #' Calculate topology image (TBC) #' #' \code{getTopologyAFMImage} return the global topological distance #' #' @param BinaryAFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy in a binary format 0 or 1 values for heigths #' @param AFMImageNetworksAnalysis an \code{\link{AFMImageNetworksAnalysis}} from Atomic Force Microscopy #' @author M.Beauvais #' @export getTopologyAFMImage<-function(BinaryAFMImage, AFMImageNetworksAnalysis){ filterVector<-unlist(BinaryAFMImage@data$h) topology<-c() for (x in 1:BinaryAFMImage@samplesperline) { for (y in 1:BinaryAFMImage@lines) { if(x==1) { bX=seq(from=0, to=BinaryAFMImage@samplesperline-1, by=1) }else{ if (x==BinaryAFMImage@samplesperline) { bX=seq(from=x-1, to=0, by=-1) }else{ bX=seq(from=x-1, to=0, by=-1) bX=c(bX, seq(from=1, to=BinaryAFMImage@samplesperline-x, by=1)) } } # bX if(y==1) { bY=seq(from=0, to=BinaryAFMImage@lines-1, by=1) }else{ if (y==BinaryAFMImage@lines) { bY=seq(from=y-1, to=0, by=-1) }else{ bY=seq(from=y-1, to=0, by=-1) bY=c(bY, seq(from=1, to=BinaryAFMImage@lines-y, by=1)) } } # bY bX=BinaryAFMImage@hscansize*bX bY=BinaryAFMImage@vscansize*bY bX<-matrix(rep(bX,BinaryAFMImage@lines), ncol=BinaryAFMImage@lines, byrow=TRUE ) bY<-matrix(rep(bY,BinaryAFMImage@samplesperline), ncol=BinaryAFMImage@samplesperline, byrow=FALSE ) nm=as.numeric(1/sqrt(bX^2+bY^2)) nm[is.infinite(nm)]<-0 #nm*filterVector res<-sum(nm*filterVector) topology<-c(topology,res) #print(res) } } scanby<-BinaryAFMImage@scansize/BinaryAFMImage@samplesperline endScan<-BinaryAFMImage@scansize*(1-1/BinaryAFMImage@samplesperline) topologyAFMImage<-AFMImage( data = data.table(x = rep(seq(0,endScan, by= scanby), times = BinaryAFMImage@lines), y = rep(seq(0,endScan, by= scanby), each = BinaryAFMImage@samplesperline), h = topology), samplesperline = BinaryAFMImage@samplesperline, lines = BinaryAFMImage@lines, vscansize = BinaryAFMImage@vscansize, hscansize = BinaryAFMImage@hscansize, scansize = BinaryAFMImage@scansize, fullfilename = BinaryAFMImage@fullfilename ) return(topologyAFMImage) } #' get a segment of points thanks to Bresenham line algorithm #' #' \code{getBresenham2DSegment} return the Bresenham segment in 2D from extremities coordinates #' #' @param x1 abscissa coordinates of the first point #' @param y1 ordinate coordinates of the first point #' @param x2 abscissa coordinates of the second point #' @param y2 ordinate coordinates of the second point #' @return a data.table of points - data.table(x, y) #' @author M.Beauvais #' @export getBresenham2DSegment<-function(x1, y1, x2, y2) { resX=c() resY=c() dx<-x2-x1 dy<-y2-y1 #print(paste("getBresenham2DSegment",dx,dy)) if (dx !=0) { if (dx > 0) { if (dy !=0) { if (dy > 0) { if (dx >= dy) { e<-dx dx <- e * 2 dy <- dy * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 + 1 if (x1 == x2) break e <- e - dy if (e < 0) { y1 <- y1 + 1 e <- e + dx } } } else { e <- dy dy <- e * 2 dx <- dx * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) y1 <- y1 + 1 if (y1 == y2) break e <- e - dx if (e < 0) { x1 <- x1 + 1 e <- e + dy } } } }else if (dy < 0){ # dy < 0 (et dx > 0) if (dx >= -dy) { e <- dx dx <- e * 2 dy <- dy * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 + 1 if (x1 == x2) break e <- e + dy if (e < 0) { y1 <- y1 - 1 e <- e + dx } } } else{ e <- dy dy <- e * 2 dx <- dx * 2 #print(c(e,dy,dx)) while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) #print(c(x1, y1)) y1 <- y1 - 1 if (y1 == y2) break e <- e - dx #print(paste(c("e",e))) if (e < 0) { x1 <- x1 + 1 if(x1>x2) x1=x2 # MB !!! e <- e - dy #print(paste(c("e",e))) } } } } } else if (dy == 0){ # dy = 0 (et dx > 0) while(x1 != x2) { resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 + 1 } } }else if (dx<0) { # dx < 0 dy <- y2 - y1 if (dy != 0) { if (dy > 0) { if (-dx >= dy) { e <- dx dx <- e * 2 dy <- dy * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 - 1 if (x1 == x2) break e <- e + dy if (e >= 0) { y1 <- y1 + 1 e <- e + dx } } }else{ e <- dy dy <- e * 2 dx <- dx * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) y1 <- y1 + 1 if ( y1 == y2) break e <- e + dx if (e <= 0) { x1 <- x1 - 1 e <- e + dy } } } }else if(dy <0) { # dy < 0 (et dx < 0) if (dx <= dy) { e <- dx dx <- e * 2 dy <- dy * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 - 1 if (x1 == x2) break e <- e - dy if (e >= 0) { y1 <- y1 - 1 e <- e + dx } } } else { e <- dy dy <- e * 2 dx <- dx * 2 while(TRUE){ resX=c(resX,x1); resY=c(resY, y1) y1 <- y1 - 1 if ( y1 == y2 ) break e <- e - dx if (e >= 0) { x1 <- x1 - 1 e <- e + dy } } } } } else if (dy==0) { # dy = 0 (et dx < 0) while(x1!=x2) { resX=c(resX,x1); resY=c(resY, y1) x1 <- x1 - 1 } } } } else if (dx==0) { # dx = 0 dy <- y2 - y1 if (dy != 0) { if (dy > 0) { while(y1 != y2) { resX=c(resX,x1); resY=c(resY, y1) y1 <- y1 + 1 } } else if (dy < 0) { # dy < 0 (et dx = 0) while(y1!=y2) { resX=c(resX,x1); resY=c(resY, y1) y1 <- y1 - 1 } } } } resX=c(resX,x2); resY=c(resY, y2) pts = data.table(x=resX, y=resY) return(pts) } #' thin an Image in matrix format #' #' @param imageMatrix a matrix of an AFM image #' @export #' @author M.Beauvais thinImage <- function(imageMatrix) { absDiff <- function(matrix1,matrix2) { r <- nrow(matrix1) c <- ncol(matrix1) destMatrix <- matrix1 for(r in 0:r-1) { for(c in 0:c-1) { destMatrix[r,c] <- abs(matrix1[r,c]-matrix1[r,c]) } } return(destMatrix) } countNonZero <- function(inputMatrix) { return(length(inputMatrix[inputMatrix > 0])) } thinningIteration <- function(imageMatrix, iter) { imageInput <- imageMatrix r <- nrow(imageInput) - 1 c <- ncol(imageInput) - 1 for(i in 2:r) { for(j in 2:c) { p2 <- imageInput[i-1, j] p3 <- imageInput[i-1, j+1] p4 <- imageInput[i, j+1] p5 <- imageInput[i+1, j+1] p6 <- imageInput[i+1, j] p7 <- imageInput[i+1, j-1] p8 <- imageInput[i, j-1] p9 <- imageInput[i-1, j-1] A <- (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) + (p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) + (p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) + (p8 == 0 && p9 == 1) + (p9 == 0 && p2 == 1) B <- p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9 if(iter == 0){ m1 <- (p2 * p4 * p6) m2 <- (p4 * p6 * p8) } else { m1 <- (p2 * p4 * p8) m2 <- (p2 * p6 * p8) } if (A == 1 && (B >= 2 && B <= 6) && m1 == 0 && m2 == 0) { imageInput[i,j] <- 0 } } } return(imageInput) } im <- imageMatrix prev <- im repeat { im <- thinningIteration(im, 0) im <- thinningIteration(im, 1) diff <- absDiff(im, prev) prev <- im if(countNonZero(diff) <= 0) { break } } return(im) } #' identify largest circles in binary image #' #' \code{identifyNodesWithCircles} return TRUE if vertex is adjacent to a better vertex #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param ... cl: a cluster object from the parallel package #' @return AFMImageNetworksAnalysis the \code{\link{AFMImageNetworksAnalysis}} instance #' @author M.Beauvais #' @export identifyNodesWithCircles<-function(...,AFMImageNetworksAnalysis) { force(AFMImageNetworksAnalysis) BIGGER_CIRCLE_RADIUS<-4 BIGGER_CIRCLE_RADIUS_MULTILPLIER<-2 SAMPLE_ON_THIN_PORTIONS<-25 # percents MAX_DISTANCE<-64 AREA_MIN <-25 CLUSTER_COUNT_MIN <- 45 CIRCLE_RADIUS_INIT <- 25 cluster<-node<-mindist<-maxdist<-keep<-NULL nbOfCircles<-maxArea<-h<-NULL clusterLon<-clusterLat<-cluster<-IDX<-keepThinPoints<-meandist<-NULL #cl<-cl #spDistsN1<-nbOfCircles<-maxArea<-h<-NULL args<-names(list(...)) print(args) if (is.null(args)) { clExist<-FALSE }else{ clExist<-c(match('cl',args)!=-1) cl<-cl } if (clExist) { print("using parallel") requireNamespace("parallel") } newCircleAFMImage<-copy(AFMImageNetworksAnalysis@binaryAFMImage) newCircleAFMImage2<-copy(AFMImageNetworksAnalysis@binaryAFMImage) circleRadius<-CIRCLE_RADIUS_INIT iteration<-0 rm(avgDT) while(circleRadius>0) { iteration=iteration+1 circleRadius=circleRadius-1 blockSize<-circleRadius*2+1 print(paste0("circleRadius:",circleRadius)) if ((blockSize>newCircleAFMImage@samplesperline)|((blockSize-1)>newCircleAFMImage@lines)) { print(paste0("too big blockSize", blockSize)) }else{ if (circleRadius>0) { circleCenter<-c(circleRadius, circleRadius) circlePts = SpatialPoints(cbind(rep(1:(blockSize),blockSize), rep(1:(blockSize),1,each= blockSize))) # pts circlenm <- sp::spDistsN1(pts=circlePts, pt=circleCenter, longlat=FALSE) # nm # nm<circleRadius # find all blocks in image # and check if the circle with biggest radius inside the block exists in the image # if yes, set all the height of all the points inside circle to 10 binaryAFMImageMatrix<-matrix(newCircleAFMImage@data$h, ncol=newCircleAFMImage@samplesperline) newBlockAFMImageMatrix<-matrix(newCircleAFMImage@data$h, ncol=newCircleAFMImage@samplesperline) allXY<-expand.grid(1:(newCircleAFMImage@samplesperline-blockSize), 1:(newCircleAFMImage@lines-blockSize)) orderXY<-sample.int(nrow(allXY), nrow(allXY), replace = FALSE) allXY<-data.table(allXY) colnames(allXY)<-c("x","y") # for (x in seq(1:(newCircleAFMImage@samplesperline-blockSize))) { # for (y in seq(1:(newCircleAFMImage@lines-blockSize))) { for (indexXY in seq(1:length(orderXY))) { x<-allXY[orderXY[indexXY], ]$x y<-allXY[orderXY[indexXY], ]$y #print(paste(x,y)) #heights<-newCircleAFMImage@data$h tempMatrix<-binaryAFMImageMatrix[x:(x+blockSize),y:(y+blockSize)] if ((!anyNA(as.vector(tempMatrix)[circlenm<=circleRadius]))& (all(as.vector(tempMatrix)[circlenm<=circleRadius] == 1) == TRUE)) { print (paste(x,y)) newBlockAFMImageMatrix[x+circleRadius, y+circleRadius]<-10 newBlockAFMImage<-copy(newCircleAFMImage) newBlockAFMImage@data$h<-as.vector(newBlockAFMImageMatrix) #displayIn3D(newBlockAFMImage, noLight=TRUE) newBlockAFMImage2<-copy(newBlockAFMImage) newBlockAFMImage2@data$h[newBlockAFMImage2@data$h<3]<-0 #displayIn3D(newBlockAFMImage2, noLight=TRUE) newBlockAFMImageMatrix2<-matrix(newBlockAFMImage2@data$h, ncol=newBlockAFMImage2@samplesperline) # get coordinates of non 0 elements nonZeroElements<-which(newBlockAFMImageMatrix2!=0,arr.ind = T) #print(paste("circleRadius",circleRadius)) lat<-nonZeroElements[,1] lon<-nonZeroElements[,2] nodesToBeRemoved=data.table(lon,lat,circleRadius) #print(nodesToBeRemoved) newCircleAFMImage@data$h[nodesToBeRemoved$lon+1+nodesToBeRemoved$lat*newCircleAFMImage@samplesperline]<-0 for(oneCenter in seq(1, nrow(nodesToBeRemoved))) { center<-c(nodesToBeRemoved[oneCenter,]$lat, nodesToBeRemoved[oneCenter,]$lon) # Use a bigger circle that will be removed from image # in order to exclude other nodes that could be very near circleRadius2=circleRadius+BIGGER_CIRCLE_RADIUS #circleRadius2=circleRadius blockSize2=circleRadius2*BIGGER_CIRCLE_RADIUS_MULTILPLIER+1 pts = SpatialPoints(cbind(rep(0:(blockSize2-1),blockSize2)+center[1]-circleRadius2, rep(0:(blockSize2-1),1,each= blockSize2)+center[2]-circleRadius2)) pts<-pts[pts$coords.x1>0&pts$coords.x1<newCircleAFMImage2@lines&pts$coords.x2>0&pts$coords.x2<newCircleAFMImage2@samplesperline] nm <- sp::spDistsN1(pts=pts, pt=center, longlat=FALSE) # points that are inside the circle listOfPointsInsideCircle<-pts[nm<=circleRadius2] newCircleAFMImage@data$h[listOfPointsInsideCircle$coords.x1+1+(listOfPointsInsideCircle$coords.x2)*newCircleAFMImage@samplesperline]<-0 #displayIn3D(newCircleAFMImage, noLight=TRUE) } binaryAFMImageMatrix<-matrix(newCircleAFMImage@data$h, ncol=newCircleAFMImage@samplesperline) newBlockAFMImageMatrix<-matrix(newCircleAFMImage@data$h, ncol=newCircleAFMImage@samplesperline) # displayIn3D(newCircleAFMImage2, noLight=TRUE) if (!exists("avgDT")) { avgDT<-nodesToBeRemoved avgDT2<-copy(avgDT) }else{ avgDT2<-nodesToBeRemoved avgDT<-rbind(avgDT,avgDT2) } #print(avgDT2) #print(paste("circleRadius=",circleRadius,"- nb of centers",nrow(avgDT2))) for(oneCenter in seq(1, nrow(avgDT2))) { center<-c(avgDT2[oneCenter,]$lat, avgDT2[oneCenter,]$lon) #center<-c(0,0) #pts = SpatialPoints(cbind(rep(1:blockSize,blockSize)+center[1]-circleRadius, rep(1:blockSize,1,each= blockSize)+center[2]-circleRadius)) pts = SpatialPoints(cbind(rep(0:(blockSize2-1),blockSize2)+center[1]-circleRadius2, rep(0:(blockSize2-1),1,each= blockSize2)+center[2]-circleRadius2)) pts<-pts[pts$coords.x1>0&pts$coords.x1<newCircleAFMImage2@lines&pts$coords.x2>0&pts$coords.x2<newCircleAFMImage2@samplesperline] nm <- sp::spDistsN1(pts=pts, pt=center, longlat=FALSE) # points that are inside the circle listOfPointsInsideCircle<-pts[nm<=circleRadius] #print(listOfPointsInsideCircle$coords.x1) newCircleAFMImage2@data$h[listOfPointsInsideCircle$coords.x1+1+(listOfPointsInsideCircle$coords.x2)*newCircleAFMImage2@samplesperline]<-newCircleAFMImage2@samplesperline+iteration*10 } } } } } } # displayIn3D(newCircleAFMImage2, noLight=TRUE) # displayIn3D(newCircleAFMImage, noLight=TRUE) if (AFMImageNetworksAnalysis@smallBranchesTreatment) { # finding the extra small nodes untreatedPoints<-newCircleAFMImage@data[h!=0] islandsDT<-cbind(lon=untreatedPoints$y*newCircleAFMImage@lines/newCircleAFMImage@vscansize, lat=untreatedPoints$x*newCircleAFMImage@samplesperline/newCircleAFMImage@hscansize) DBSCAN <- dbscan(islandsDT, eps = 1.5, MinPts = 3, borderPoints=FALSE) #plot(untreatedPoints$y, untreatedPoints$x, col = DBSCAN$cluster, pch = 20) #plot(islandsDT, col = DBSCAN$cluster, pch = 20) islandsDT<-data.table(islandsDT,cluster=DBSCAN$cluster) setkeyv(islandsDT, "cluster") isolatedIslandsDT<-islandsDT[cluster==0,] isolatedIslandsDT islandsDT<-islandsDT[cluster!=0,] islandsDT identifyLinksBetweenClustersAndExistingNodes<-function(clusterN, AFMImageNetworksAnalysis, MAX_DISTANCE, avgDT, islandsDT) { requireNamespace("data.table") requireNamespace("sp") requireNamespace("AFM") clusterLon<-clusterLat<-cluster<-IDX<-keepThinPoints<-meandist<-NULL print(clusterN) resDT<-data.table(cluster=c(0), clusterLon=c(0), clusterLat=c(0), existingNodeLon=c(0), existingNodeLat=c(0)) centers1<-islandsDT[islandsDT$cluster %in% clusterN,] # define the points in the circle otherNodes<-copy(avgDT) for (center_index in seq(1,nrow(centers1))) { center1<-centers1[center_index,] circleRadius1<-1 #otherNodes<-allNodesAsSpatialPoints[!(allNodesAsSpatialPoints$coords.x1==center1$lon&allNodesAsSpatialPoints$coords.x2==center1$lat)] minLat<- ifelse((center1$lat-MAX_DISTANCE)>0, center1$lat-MAX_DISTANCE, 0) maxLat<- ifelse((center1$lat+MAX_DISTANCE)<AFMImageNetworksAnalysis@binaryAFMImage@lines, center1$lat+MAX_DISTANCE, AFMImageNetworksAnalysis@binaryAFMImage@lines-1) minLon<- ifelse((center1$lon-MAX_DISTANCE)>0, center1$lon-MAX_DISTANCE, 0) maxLon<- ifelse((center1$lon+MAX_DISTANCE)<AFMImageNetworksAnalysis@binaryAFMImage@samplesperline, center1$lon+MAX_DISTANCE, AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) otherNodes2<-copy(avgDT[lon>=minLon&lon<=maxLon&lat>=minLat&lat<=maxLat,]) otherNodes2$dist<-sp::spDistsN1(pts=matrix(c(otherNodes2$lon, otherNodes2$lat), ncol=2), pt=c(center1$lon, center1$lat), longlat=FALSE) #otherNodes otherNodes2<-otherNodes2[with(otherNodes2, order(otherNodes2$dist)), ] otherNodes2<-otherNodes2[otherNodes2$dist<MAX_DISTANCE,] if (nrow(otherNodes2)>0) { for (centerId2Nb in seq(1, nrow(otherNodes2))) { pt<-otherNodes2[centerId2Nb,] if (AreNodesConnected(AFMImageNetworksAnalysis@binaryAFMImage, center1, circleRadius1, data.table(lon=pt$lon, lat=pt$lat), pt$circleRadius)) { #print("yes") resDT=rbind(resDT, data.table(cluster=clusterN, clusterLon=center1$lon, clusterLat=center1$lat, existingNodeLon=pt$lon, existingNodeLat=pt$lat)) } } } } resDT<-resDT[-1,] return(resDT) } print(paste("number of nodes=", nrow(avgDT))) print(paste("number of clusters=", length(unique(islandsDT$cluster)))) if(exists("avgDT")) { start.time <- Sys.time() print(start.time) if(clExist) { parallel::clusterEvalQ(cl , c(library("data.table"),library("sp"), library("AFM"))) parallel::clusterExport(cl, c("AFMImageNetworksAnalysis", "MAX_DISTANCE", "avgDT", "islandsDT"), envir=environment()) res<-parallel::parLapply(cl, unique(islandsDT$cluster),identifyLinksBetweenClustersAndExistingNodes, AFMImageNetworksAnalysis, MAX_DISTANCE, avgDT, islandsDT) }else{ res<-lapply(unique(islandsDT$cluster),identifyLinksBetweenClustersAndExistingNodes, AFMImageNetworksAnalysis, MAX_DISTANCE, avgDT, islandsDT) } end.time <- Sys.time() time.taken <- end.time - start.time print(paste0("time.taken: ",time.taken)) resDT<-rbindlist(res) connectedIslandsDT<-islandsDT[cluster %in% unique(resDT$cluster),] # plot existing clusters #plot( connectedIslandsDT$lon, connectedIslandsDT$lat,col = connectedIslandsDT$cluster, pch = 20) #plot( islandsDT$lon, islandsDT$lat,col = islandsDT$cluster, pch = 20) # calculate distance between points in the cluster and existing nodes resDT$node<-paste0(resDT$existingNodeLon,"-",resDT$existingNodeLat) resDT resDT$dist<-sapply(1:nrow(resDT),function(i) sp::spDistsN1(pts=as.matrix(resDT[i,2:3,with=FALSE]),pt=as.matrix(resDT[i,4:5,with=FALSE]),longlat=FALSE)) resDT # find the closest existing node resDT[,meandist:=mean(dist), by=list(cluster,node)] resDT[,mindist:=min(meandist), by=list(cluster)] setkey(resDT, meandist, mindist) resDT$keep<-sapply(1:nrow(resDT),function(i) if (resDT[i,8,with=FALSE] == resDT[i,9,with=FALSE]) return(TRUE) else return(FALSE)) resDT #MB TODO # more in the width or in the height ? print("start spliting segment regularly...") clusterChar = data.table ( cluster = unique(islandsDT$cluster), minLon = islandsDT[, min(lon), by=cluster]$V1, maxLon = islandsDT[, max(lon), by=cluster]$V1, minLat = islandsDT[, min(lat), by=cluster]$V1, maxLat = islandsDT[, max(lat), by=cluster]$V1) clusterChar$area<-(clusterChar$maxLat-clusterChar$minLat)*(clusterChar$maxLon-clusterChar$minLon) clusterChar$count<-islandsDT[,.N, by=cluster]$N clusterChar$shape<-sapply(1:nrow(clusterChar),function(i) { if ((clusterChar[i,]$maxLon-clusterChar[i,]$minLon)>(clusterChar[i,]$maxLat-clusterChar[i,]$minLat)) { return("width") }else{ return("height") } }) print(clusterChar) rm(resDT6) i=3 # 30 % in regular space resDT6<-lapply(1:nrow(clusterChar),function(i) { if ((clusterChar[i,]$area <= AREA_MIN)|(clusterChar[i,]$count <= CLUSTER_COUNT_MIN)) { # if sample not extremely small if (!clusterChar[i,]$count <= 3) { # sample only one point if the cluster is with small area clusterN<-clusterChar[i,]$cluster resDT2<-islandsDT[cluster %in% clusterN,] #print(resDT2) sampleC<-sample(1:nrow(resDT2),1) return(data.table(cluster=clusterN, lon = resDT2[sampleC,]$lon, lat= resDT2[sampleC,]$lat)) } }else { # every 5 pixel if (clusterChar[i,]$shape == "height") { totalHeight<-clusterChar[i,]$maxLat-clusterChar[i,]$minLat #print(totalHeight) sSample = floor(SAMPLE_ON_THIN_PORTIONS*totalHeight/100) if (sSample>0) { if (sSample>5) sSample <-5 latVector <- seq(from = clusterChar[i,]$minLat + 1, to = clusterChar[i,]$maxLat - 1, by = sSample ) latVector <- floor(latVector) #j=3 resDT5<-lapply(1:length(latVector),function(j, clusterN = clusterChar[i,]$cluster) { resDT2<-islandsDT[lat %in% latVector[j] & cluster %in% clusterN,] #print(resDT2) avgDTLon<-floor(mean(resDT2$lon)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, latVector[j] )) return(data.table(cluster=clusterN, lon = avgDTLon, lat= latVector[j])) }) print(resDT5) return(rbindlist(resDT5)) } }else{ totalWidth<-clusterChar[i,]$maxLon-clusterChar[i,]$minLon #print(totalHeight) sSample = floor(SAMPLE_ON_THIN_PORTIONS*totalWidth/100) if (sSample>0) { lonVector <- seq(from = clusterChar[i,]$minLon + 1, to = clusterChar[i,]$maxLon - 1, by = sSample ) lonVector <- floor(lonVector) #j=1 resDT5<-lapply(1:length(lonVector),function(j, clusterN = clusterChar[i,]$cluster) { resDT2<-islandsDT[lon %in% lonVector[j] & cluster %in% clusterN,] #print(resDT2) avgDTLat<-floor(mean(resDT2$lat)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, lonVector[j] )) return(data.table(cluster=clusterN, lon = lonVector[j], lat= avgDTLat)) } ) print(resDT5) return(rbindlist(resDT5)) } } } }) resDT6<-rbindlist(resDT6) resDT6<-unique(resDT6) resDT6<-resDT6[complete.cases(resDT6),] resDT6<-resDT6[lon != 0 & lon != (AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) & lat!=0 & lat!=(AFMImageNetworksAnalysis@binaryAFMImage@lines-1) ,] resDT6 # good vizualisationof intermediary results # resDT7<-copy(resDT6) # resDT7$cluster<-rep(888, nrow(resDT7)) # resDT7 # islandsDT2<-rbind(islandsDT, resDT7) #plot( islandsDT2$lon, islandsDT2$lat,col = islandsDT2$cluster, pch = 20) #plot(untreatedPoints$y, untreatedPoints$x, col = DBSCAN$cluster, pch = 20) # get the list of closest existing nodes print("start defining the farthest points...") setkey(resDT, cluster) resDT2<-copy(unique(resDT)) resDT2<-data.table(cluster=resDT2$cluster, existingNodeLon=resDT2$existingNodeLon,existingNodeLat=resDT2$existingNodeLat) setkey(resDT2, cluster) resDT2<-unique(resDT2) # get the list of farthest points from the existing nodes in previous list setkey(resDT2, cluster) setkey(connectedIslandsDT, cluster) print(connectedIslandsDT) print(unique(connectedIslandsDT)) print(resDT2) connectedIslandsDT<-merge(connectedIslandsDT, resDT2, by="cluster", allow.cartesian=TRUE) connectedIslandsDT$dist<-sapply(1:nrow(connectedIslandsDT),function(i) sp::spDistsN1(pts=as.matrix(connectedIslandsDT[i,2:3,with=FALSE]), pt=as.matrix(connectedIslandsDT[i,4:5,with=FALSE]),longlat=FALSE)) connectedIslandsDT[,maxdist:=max(dist), by=list(cluster)] connectedIslandsDT$keep<-sapply(1:nrow(connectedIslandsDT),function(i) if (connectedIslandsDT[i,6,with=FALSE] == connectedIslandsDT[i,7,with=FALSE]) return(TRUE) else return(FALSE)) connectedIslandsDT farthestPoints<-data.table(lon=connectedIslandsDT[keep %in% c(TRUE),]$lon, lat=connectedIslandsDT[keep %in% c(TRUE),]$lat, cluster=connectedIslandsDT[keep %in% c(TRUE),]$cluster, dist=connectedIslandsDT[keep %in% c(TRUE),]$dist) farthestPoints # if farthest nodes is not connected, create an intermediary node #TODO # compare distance of farthest nodes with distance of other node, if close to another node and connected, do not use the node sapply(1:nrow(resDT),function(i) if (resDT[i,8,with=FALSE] == resDT[i,9,with=FALSE]) return(TRUE) else return(FALSE)) resDT$dist<-as.numeric(resDT$dist) farthestPoints$keep<-sapply(1:nrow(farthestPoints),function(i) if (nrow(resDT[clusterLon %in% farthestPoints[i,1,with=FALSE]$lon & clusterLat %in% farthestPoints[i,2,with=FALSE]$lat & dist < (1.05*farthestPoints[i,4,with=FALSE]$dist),])>0) return(FALSE) else return(TRUE)) farthestPoints farthestPoints[keep %in% c(TRUE),] avgDT<-rbind(avgDT, data.table(lon=farthestPoints[keep %in% c(TRUE),]$lon, lat=farthestPoints[keep %in% c(TRUE),]$lat,circleRadius=rep(0, nrow(farthestPoints[keep %in% c(TRUE),])))) avgDT<-rbind(avgDT, data.table(lon = resDT6$lon, lat = resDT6$lat, circleRadius=rep(0, nrow(resDT6)))) avgDT # start of thinning image # # sapply(1:nrow(resDT6), function(i, resDT6) { # # connectedIslandsDT[lon %in% c(resDT6[i,]$lon) & lat %in% c(resDT6[i,]$lat),]$keep<-TRUE # # }, resDT6) # # # # thin remain of image # print("thining islands") # # input from algorithm # connectedIslandsDT # # # plot( connectedIslandsDT$lon, connectedIslandsDT$lat,col = connectedIslandsDT$cluster, pch = 20) # # displayIn3D(AFMImageNetworksAnalysis@binaryAFMImage, noLight=TRUE) # # displayIn3D(AFMImageNetworksAnalysis@binaryAFMImageWithCircles, noLight=TRUE) # # # mtx <- matrix(0, nrow=AFMImageNetworksAnalysis@binaryAFMImage@lines, # ncol=AFMImageNetworksAnalysis@binaryAFMImage@samplesperline) # mtx[connectedIslandsDT$lat+1+AFMImageNetworksAnalysis@binaryAFMImage@samplesperline*connectedIslandsDT$lon]<-1 # #mtx[islandsDT$lat+1+AFMImageNetworksAnalysis@binaryAFMImage@samplesperline*islandsDT$lon]<-1 # islandsDT # # pimage(mtx) # # # singleImageMatrix<-matrix(AFMImageNetworksAnalysis@binaryAFMImage@data$h, ncol=AFMImageNetworksAnalysis@binaryAFMImage@samplesperline) # # #Display the binary image # # pimage(singleImageMatrix) # # pimage(thinImage(singleImageMatrix)) # # # # #Thin the image using our thinning library # thin <- mtx # thin <- thinImage(mtx) # pimage(thin) # # #Display the thinned image # # pimage(thin) # # # # # keep only the points after thinning # # thinPoints<-which(thin!=0,arr.ind = T) # # connectedIslandsDT$thinPoints<-FALSE # connectedIslandsDT$keepThinPoints<-FALSE # # # apply(thinPoints, 1, function(x, connectedIslandsDT) { # connectedIslandsDT[lat %in% c(x[1]-1) & lon %in% c(x[2]-1),thinPoints:= TRUE] # }, connectedIslandsDT) # # # connectedIslandsDT$thinPoints # # # take a sample of all the thin point per cluster # #connectedIslandsDT[ , `:=`( COUNT = .N , IDX = 1:.N ) , by = cluster ] # #connectedIslandsDT$COUNT<-NULL # connectedIslandsDT[ , `:=`( IDX = 1:.N ) ] # # connectedIslandsDT # # listOfClusters<-unique(connectedIslandsDT$cluster) # sapply(listOfClusters, function(x, connectedIslandsDT, SAMPLE_ON_THIN_PORTIONS) { # allIndexes<-connectedIslandsDT[cluster %in% x & thinPoints == TRUE,]$IDX # keepThinIndexes<-sample(allIndexes, floor(length(allIndexes)*SAMPLE_ON_THIN_PORTIONS/100)) # connectedIslandsDT[IDX %in% keepThinIndexes,keepThinPoints:= TRUE] # },connectedIslandsDT, SAMPLE_ON_THIN_PORTIONS) # # connectedIslandsDT # # # add keepThinPoints to avgDT # # connectedIslandsDT[keepThinPoints == TRUE,] # # avgDT<-rbind(avgDT, # data.table(lon = connectedIslandsDT[keepThinPoints == TRUE,]$lon, # lat = connectedIslandsDT[keepThinPoints == TRUE,]$lat, # circleRadius = rep(0, nrow(connectedIslandsDT[keepThinPoints == TRUE,])))) } # avgDT # displayIn3D(AFMImageNetworksAnalysis@binaryAFMImage, noLight=TRUE) } AFMImageNetworksAnalysis@binaryAFMImageWithCircles<-copy(newCircleAFMImage2) avgDT$keep<-rep(TRUE, nrow(avgDT)) AFMImageNetworksAnalysis@circlesTable<-copy(unique(avgDT)) return(AFMImageNetworksAnalysis) } #' getIntersectionPointWithBorder to be described #' #' \code{getIntersectionPointWithBorder} return a data.table #' #' @param AFMImage a \code{\link{AFMImage}} from Atomic Force Microscopy #' @param center center #' @param r radius #' @param deg degree #' @author M.Beauvais #' @export getIntersectionPointWithBorder<-function(AFMImage, center, r, deg) { theta <- (deg * pi) / (180) x = center$lon + r * cos(theta) y = center$lat + r * sin(theta) pt=data.table(lat=y, lon=x) return(pt) } #' get a triangle starting from center, two segments of length r with angles deg1 and deg2 #' #' \code{getTriangle} return a data.table points of a triangle #' #' @param AFMImage a \code{\link{AFMImage}} from Atomic Force Microscopy #' @param center center #' @param r length of segment #' @param deg1 angle 1 #' @param deg2 angel 2 #' @author M.Beauvais #' @export getTriangle<-function(AFMImage, center, r, deg1, deg2) { pt1=getIntersectionPointWithBorder(AFMImage, center, r, deg1) pt2=getIntersectionPointWithBorder(AFMImage, center, r, deg2) trianglePts=data.table(lon=c(center$lon, pt1$lon, pt2$lon,center$lon), lat=c(center$lat, pt1$lat, pt2$lat,center$lat)) return(trianglePts) } #' existsSegment checks if a segment exists in an AFMImage; check if all the heights at the segment coordinates are different to zero. #' #' \code{existsSegment} return a boolean #' #' @param AFMImage a \code{\link{AFMImage}} from Atomic Force Microscopy or a binary \code{\link{AFMImage}} #' @param segment a data.table coming from the getBresenham2Dsegment - x and y should start from 1,1 #TODO Segment class #' @return TRUE if all the heights of the segment are different from zero #' @author M.Beauvais #' @export existsSegment<-function(AFMImage, segment) { #print(segment) res<-!any(AFMImage@data$h[segment$x+(segment$y)*AFMImage@samplesperline]==0) #print(res) return(res) } #test existsSegment(binaryAFMImage, segment= getBresenham2DSegment(10, 9,11, 9)) # existsSegment(binaryAFMImage, segment= getBresenham2DSegment(504,358,511,335)) # binaryAFMImage@samplesperline # segment= getBresenham2DSegment(504,358,511,335) # segment # binaryAFMImage@data$h[segment$y+1+segment$x*binaryAFMImage@samplesperline] # # # center$lon and center$lat #Test # library(sp) # library(data.table) # # Lines<-64 # Samplesperline<-64 # ScanSize<-128 # scanby<-ScanSize/Samplesperline # endScan<-ScanSize*(1-1/Samplesperline) # fullfilename="circlesMatrixImage" # # binaryAFMImage<-AFMImage( # data = data.table(x = rep(seq(0,endScan, by= scanby), times = Lines), # y = rep(seq(0,endScan, by= scanby), each = Samplesperline), # h = rep(1, Lines*Samplesperline*10)), # samplesperline = Samplesperline, lines = Lines, # vscansize = ScanSize, hscansize = ScanSize, scansize = ScanSize, # fullfilename = fullfilename ) # getCircleSpatialPoints(binaryAFMImage, center=data.table(lon=20, lat=15), circleRadius=0) #getCircleSpatialPoints(binaryAFMImage, center= data.table(lon=20, lat=10), circleRadius=5) #getCircleSpatialPoints(binaryAFMImage, center= data.table(lon=20, lat=10), circleRadius=0) #getCircleSpatialPoints(binaryAFMImage, center= data.table(lon=20, lat=10), circleRadius=1) #center= data.table(lon=20, lat=10) #' get the spatial points on the circle including the center of the circle #' #' @param binaryAFMImage a binary \code{\link{AFMImage}} from Atomic Force Microscopy #' @param center the center of the circle with center$lon as the x coordinates and center$lat as the y coordinates #' @param circleRadius the radius of the circle #' @return a \code{\link{SpatialPoints}} object of all the points of the circle including the center of the circle #' @export #' @author M.Beauvais getCircleSpatialPoints<-function(binaryAFMImage, center, circleRadius) { if (circleRadius<0) { stop("getCircleSpatialPoints - the radius is inferior to 0") return() } if (circleRadius>0) { blockSize<-circleRadius*2+1 pts = SpatialPoints(cbind(rep(1:blockSize,blockSize)+center$lon-circleRadius-1, rep(1:blockSize,1,each= blockSize)+center$lat-circleRadius-1)) #print(pts) pts<-pts[pts$coords.x1>0&pts$coords.x1<binaryAFMImage@lines&pts$coords.x2>0&pts$coords.x2<binaryAFMImage@samplesperline] #plot(pts) nm <- sp::spDistsN1(pts=matrix(c(pts$coords.x1, pts$coords.x2), ncol=2), pt=c(center$lon, center$lat), longlat=FALSE) #print(nm) #centerAllpoints<-pts[nm==circleRadius] centerAllpoints<-pts[nm<=circleRadius] uniqueX2<-unique(centerAllpoints$coords.x2) #uniqueX2 res<-lapply(1:length(uniqueX2),function(i,centerAllpoints, uniqueX2) { allX1<-centerAllpoints[centerAllpoints$coords.x2 == uniqueX2[i]]$coords.x1 #print(allX1) min<-min(allX1) max<-max(allX1) if (min!=max) { x1<-c(min, max) return(data.table(x1, x2=rep(uniqueX2[i], 2))) }else{ return(data.table(x1=min, x2=uniqueX2[i])) } },centerAllpoints, uniqueX2) resDT<-rbindlist(res) #resDT<-rbind(resDT, data.table(x1=center$lon,x2=center$lat) centerAllpoints<-SpatialPoints(cbind( c(resDT$x1, center$lon), c(resDT$x2, center$lat) )) #plot(centerAllpoints) }else{ # circleRadius == 0 centerAllpoints<-SpatialPoints(cbind(center$lon, center$lat)) } #print("ok") return(centerAllpoints) } # test # Test #AreNodesConnected(binaryAFMImage, data.table(lon=226, lat=344), 10, data.table(lon=25, lat=344), 5) #AreNodesConnected(binaryAFMImage, data.table(lon=226, lat=344), 10, data.table(lon=25, lat=344), 0) #AreNodesConnected(binaryAFMImage, center1, circleRadius1, data.table(lon=pt$coords.x1, lat=pt$coords.x2), pt$circleRadius) # AreNodesConnected(binaryAFMImage, data.table(lon=76, lat=60), 1, data.table(lon=79, lat=65), 0) # AreNodesConnected(binaryAFMImage, data.table(lon=76, lat=60), 0, data.table(lon=79, lat=65), 0) # # circle1AllPoints<-getCircleSpatialPoints(binaryAFMImage, data.table(lon=76, lat=60), 1) # circle1AllPoints<-circle1AllPoints[which(binaryAFMImage@data$h[circle1AllPoints$coords.x2+1+circle1AllPoints$coords.x1*binaryAFMImage@samplesperline]!=0)] # circle1AllPoints # # existsSegment(binaryAFMImage, segment= getBresenham2DSegment(76,60,79,65)) # binaryAFMImage@data$h[segment$y+1+segment$x*binaryAFMImage@samplesperline] # which(binaryAFMImage@data$h[segment$y+1+segment$x*AFMImage@samplesperline]!=0) #' check if nodes represented by circles are connected. The function defines all the possible segments between the circles and check if at least one segment exists. #' #' @param binaryAFMImage a binary \code{\link{AFMImage}} from Atomic Force Microscopy #' @param center1 the center of the circle with center$lon as the x coordinates and center$lat as the y coordinates #' @param radius1 the radius of the circle #' @param center2 the center of the circle with center$lon as the x coordinates and center$lat as the y coordinates #' @param radius2 the radius of the circle #' @return TRUE if the nodes are connected #' @export #' @author M.Beauvais AreNodesConnected<-function(binaryAFMImage, center1, radius1, center2, radius2) { # print(center1) # print(radius1) # print(center2) # print(radius2) if (radius1>0) { circle1AllPoints<-getCircleSpatialPoints(binaryAFMImage, center1, radius1) }else{ circle1AllPoints<-getCircleSpatialPoints(binaryAFMImage, center1, 1) circle1AllPoints<-circle1AllPoints[which(binaryAFMImage@data$h[circle1AllPoints$coords.x2+1+circle1AllPoints$coords.x1*binaryAFMImage@samplesperline]!=0)] } # print(circle1AllPoints) # print(length(circle1AllPoints)) #plot(circle1AllPoints) if (radius2>0) circle2AllPoints<-getCircleSpatialPoints(binaryAFMImage, center2, radius2) else{ circle2AllPoints<-getCircleSpatialPoints(binaryAFMImage, center2, 1) circle2AllPoints<-circle2AllPoints[which(binaryAFMImage@data$h[circle2AllPoints$coords.x2+1+circle2AllPoints$coords.x1*binaryAFMImage@samplesperline]!=0)] } #print(circle2AllPoints) #print(length(circle2AllPoints)) #points(circle2AllPoints) if ((length(circle1AllPoints))&(length(circle2AllPoints)>0)) { for (circlePt1Nb in seq(1, length(circle1AllPoints))) { circlePt1<-circle1AllPoints[circlePt1Nb,] for (circlePt2Nb in seq(1, length(circle2AllPoints))) { circlePt2<-circle2AllPoints[circlePt2Nb,] segment<-getBresenham2DSegment(circlePt1$coords.x1, circlePt1$coords.x2, circlePt2$coords.x1, circlePt2$coords.x2) if (existsSegment(binaryAFMImage, segment)) { print(paste("segment exists",center1$lon, center1$lat,":",center2$lon, center2$lat)) return(TRUE) }else{ #print("FALSE") } } } } return(FALSE) } # binaryAFMImage<-AFMImageNetworksAnalysis@binaryAFMImage # center1 # radius1<-circleRadius1 # center2<-data.table(lon=pt$lon, lat=pt$lat) # radius2<-pt$circleRadius # #binaryAFMImage@data$h[segment$x+(segment$y)*binaryAFMImage@samplesperline] # binaryAFMImage@data$h[segment$y-1+(segment$x-1)*binaryAFMImage@samplesperline] # binaryAFMImage@data$h[segment$x-1+(segment$y-1)*binaryAFMImage@samplesperline] # displayIn3D(binaryAFMImage, noLight=TRUE) # # segment[,1]-1 # #' calculate the angle between two vectors #' #' @param x a vector #' @param y a vector #' @return the angle between the vectors #' @export #' @author M.Beauvais getAngle <- function(x,y){ dot.prod <- x%*%y norm.x <- norm(x,type="2") norm.y <- norm(y,type="2") # print(dot.prod) # print(norm.x) # print(norm.y) theta <- acos(dot.prod / (norm.x * norm.y)) if (is.nan(theta)) theta=0 return(as.numeric(theta)) } # test # getAngle(c(2,12), c(1,6)) # getAngle(c(2,12), c(4,24)) #' check if all the angles between one edge and a list of edges is superior to a specified value. #' #' @param binaryAFMImage a binary \code{\link{AFMImage}} from Atomic Force Microscopy #' @param edge1 one edge #' @param edges2 list of edges #' @param minAngle the minimum angle value #' @return TRUE if all the angle are superior to the specified value #' @export #' @author M.Beauvais isAngleBetweenEdgesAlwaysSuperiorToMinAngle<-function(binaryAFMImage, edge1, edges2, minAngle) { #print(edge1) #print(edges2) coordsFromEdge1=getCoordinatesFromVertexId(as.numeric(edge1$from)) coordsToEdge1=getCoordinatesFromVertexId(as.numeric(edge1$to)) coordsFromEdges2=getCoordinatesFromVertexId(as.numeric(edges2$from)) coordsToEdges2=getCoordinatesFromVertexId(as.numeric(edges2$to)) # allYCoordinates<-cbind(coordsFromEdges2, coordsToEdges2) # print(allYCoordinates) x=c(coordsToEdge1$coords.x1-coordsFromEdge1$coords.x1, coordsToEdge1$coords.x2-coordsFromEdge1$coords.x2) for (y in seq(1,nrow(coordsToEdges2))){ y=c(coordsToEdges2[y,]$coords.x1-coordsFromEdges2[y,]$coords.x1, coordsToEdges2[y,]$coords.x2-coordsFromEdges2[y,]$coords.x2) angle<-getAngle(x,y) if (angle>pi) angle<-angle-pi # print(angle) #print(paste("x=",x,"y=",y,"angle=",180*angle/pi, "degrees")) if (angle<minAngle) { #print(paste(c(x,"->",y))) return(FALSE) } } return(TRUE) } # isAngleBetweenEdgesAlwaysSuperiorToMinAngle(edge1=data.table(from=vid1, to=vid2), edges2=existingEdgesVid1,0.52) # # existingEdges<-data.table(from = c("6553685"), to = c("2097229"),arrows = c("to")) # isAngleBetweenEdgesAlwaysSuperiorToMinAngle(edge1=data.table(from=1835079, to=6553685), edges2=existingEdges,0.52) # isAngleBetweenEdgesAlwaysSuperiorToMinAngle(edge1=data.table(from=6553685, to=1835079), edges2=existingEdges,0.52) #' display the network of nodes and edges #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param edges list of edges #' @param isolates list of isolated edges #' @export #' @author M.Beauvais displaygridIgraphPlotFromEdges<-function(AFMImage, edges, isolates) { #print(edges) alledges2<-as.vector(t(matrix(c(edges$from,edges$to),ncol=2))) vnodes<-unique(c(edges$from, edges$to)) vnodes<-data.frame(id=vnodes, label = vnodes) # coords=getCoordinatesFromVertexId(as.numeric(levels(vnodes$id))[vnodes$id]) # coords # coords[order(coords.x1),] g<-graph(edges= alledges2,isolates=isolates, directed=FALSE) gridIgraphPlot(AFMImage, g) } #' display the network of nodes and edges #' #' @param AFMImageNetworksAnalysis an \code{\link{AFMImageNetworksAnalysis}} #' @export #' @author M.Beauvais displaygridIgraphPlot<-function(AFMImageNetworksAnalysis) { keep<-NULL displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, AFMImageNetworksAnalysis@edgesTable[keep %in% c(TRUE),], AFMImageNetworksAnalysis@isolatedNodesList) } #' displayColoredNetworkWithVerticesSize #' #' display network #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param fullfilename a directory plus filename for export #' @export #' @author M.Beauvais displayColoredNetworkWithVerticesSize<-function(AFMImageNetworksAnalysis, fullfilename) { vid<-node_degree<-vidorder<-edgeWeigth<-NULL if(missing(fullfilename)){ save <- FALSE }else{ save <- TRUE } AFMImage<-AFMImageNetworksAnalysis@binaryAFMImage g<-AFMImageNetworksAnalysis@skeletonGraph # good vizualisation to be kept !!!! verticesAnalysisDT<-data.table(vid=V(g)$name, node_degree=unname(degree(g))) # verticesAnalysisDT$coords.x1<-getCoordinatesFromVertexId(AFMImage,verticesAnalysisDT$vid)$coords.x1 # verticesAnalysisDT$coords.x2<-getCoordinatesFromVertexId(AFMImage,verticesAnalysisDT$vid)$coords.x2 verticesAnalysisDT$coords.x1<-getCoordinatesFromVertexId(verticesAnalysisDT$vid)$coords.x1 verticesAnalysisDT$coords.x2<-getCoordinatesFromVertexId(verticesAnalysisDT$vid)$coords.x2 verticesAnalysisDT$vid<-as.numeric(verticesAnalysisDT$vid) setkey(verticesAnalysisDT, vid) cDT<-AFMImageNetworksAnalysis@circlesTable cDT$vid<-getVertexId(AFMImage,cDT$lon,cDT$lat) setkey(cDT, vid) circlesDT<-merge(verticesAnalysisDT, cDT, all = TRUE) circlesDT setkeyv(circlesDT, "vid") listOfVerticesDT<-data.table(vid=as.numeric(V(g)$name), vidorder=seq(1, length(V(g)$name))) setkeyv(listOfVerticesDT, "vid") finalDT<-merge(listOfVerticesDT,circlesDT) finalDT$color<-"black" finalDT[node_degree==0,]$color<-rep("black", nrow(finalDT[node_degree==0,])) finalDT[node_degree==1,]$color<-rep("blue", nrow(finalDT[node_degree==1,])) finalDT[node_degree>2,]$color<-rep("red", nrow(finalDT[node_degree>2,])) finalDT[node_degree==2,]$color<-rep("grey", nrow(finalDT[node_degree==2,])) vertexcolor<-finalDT[order(vidorder),]$color # define the layout matrix coordinatesVector<-getNetworkGridLayout(AFMImage, V(g)$name) #coordinatesVector l<-matrix(coordinatesVector$y ,byrow = TRUE) l<-cbind(l, coordinatesVector$x) # plot.igraph(g, layout=l, # vertex.shape="circle", vertex.size=2, vertex.label=NA, vertex.color="red", vertex.frame.color="red", # edge.color="grey" # ) # plot.igraph(g, layout=l, # vertex.shape="circle", vertex.size=2, vertex.label=NA, vertex.color=vertexcolor, vertex.frame.color=vertexcolor, # edge.color="grey" # ) vertexsize<-finalDT[order(vidorder),]$circleRadius print(vertexsize) # plot.igraph(g, layout=l, # vertex.shape="circle", vertex.size=vertexsize, vertex.label=NA, vertex.color=vertexcolor, vertex.frame.color=vertexcolor, # edge.color="grey" # ) # calculate edge weigth # mean of vertices size rm(edgeDT) edgeDT<-copy(as.data.table(get.edgelist(g))) colnames(edgeDT)<-c("vid","to") edgeDT$vid<-as.character(edgeDT$vid) edgeDT finalDT2<-data.table(vid=as.character(finalDT$vid), from_node_degree=finalDT$node_degree, from_circleRadius= finalDT$circleRadius) setkeyv(edgeDT, "vid") setkeyv(finalDT2, "vid") edgeDT<-merge(edgeDT,finalDT2, all.x=TRUE) colnames(edgeDT)<-c("from","vid","from_node_degree","from_circleRadius") edgeDT$vid<-as.character(edgeDT$vid) edgeDT finalDT2<-data.table(vid=as.character(finalDT$vid), to_node_degree=finalDT$node_degree, to_circleRadius= finalDT$circleRadius) setkeyv(edgeDT, "vid") setkeyv(finalDT2, "vid") edgeDT<-merge(edgeDT,finalDT2, all.x=TRUE) # edge weigth calculation edgeDT$edgeWeigth<-NULL #edgeDT[, nb:=.I,] edgeDT[,edgeWeigth:=(edgeDT$from_circleRadius+edgeDT$to_circleRadius)/2,by=.I] edgeDT #E(g)$weight <- edgeDT$edgeWeigth*50 if (save) png(filename = fullfilename,width = 1024, height = 1024, units = "px") plot.igraph(g, layout=l, vertex.shape="circle", vertex.size=vertexsize, vertex.label=NA, vertex.color=vertexcolor, vertex.frame.color=vertexcolor, edge.width= edgeDT$edgeWeigth, edge.color="grey" ) if (save) dev.off() } #' identifyNodesAndEdges #' #' find nodes and edges #' #' @param ... cl: a cluster object from the parallel package #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param maxHeight a double for filtering the heights - upper to this height the heights are set to zero #' @return AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' #' @export #' @author M.Beauvais identifyNodesAndEdges<-function(..., AFMImageNetworksAnalysis,maxHeight){ print(paste("identifyNodesAndEdges")) force(AFMImageNetworksAnalysis) filename<-lon<-lat<-minDistance<-from_cluster<-to_cluster<-total<-vid<-NULL meanLon<-meanLat<-NULL args<-names(list(...)) print(args) if (is.null(args)) { clExist<-FALSE }else{ clExist<-c(match('cl',args)!=-1) print(paste("clExist= ",clExist)) } if (clExist) { print("using parallel") requireNamespace("parallel") cl<-cl }else{ print("Not using parallel") } binaryAFMImage<-copy(AFMImageNetworksAnalysis@binaryAFMImage) #displayIn3D(binaryAFMImage, noLight=TRUE) newCircleAFMImage<-copy(AFMImageNetworksAnalysis@binaryAFMImage) newCircleAFMImage2<-copy(AFMImageNetworksAnalysis@binaryAFMImage) avgDT<-data.table(cluster=c(" "),lon = c(0), lat = c(0), circleRadius= c(0), keep=c(FALSE), vid=c(0)) cluster<-node<-mindist<-maxdist<-keep<-NULL nbOfCircles<-maxArea<-h<-NULL clusterLon<-clusterLat<-cluster<-IDX<-keepThinPoints<-meandist<-NULL circlesMatrixFilename<-paste0(filename, "-circlesMatrix.RData") if (clExist) { circlesMatrix<-getMaxCircleMatrix(cl=cl, newCircleAFMImage = newCircleAFMImage,CIRCLE_RADIUS_INIT=CIRCLE_RADIUS_INIT) }else{ circlesMatrix<-getMaxCircleMatrix(newCircleAFMImage = newCircleAFMImage,CIRCLE_RADIUS_INIT=CIRCLE_RADIUS_INIT) } # save(circlesMatrix,file= paste0(dirOutput,circlesMatrixFilename)) #load(file= paste0(dirOutput,circlesMatrixFilename)) #circlesMatrixAFMImage<-getAFMImageFromMatrix(binaryAFMImage, circlesMatrix) #displayIn3D(circlesMatrixAFMImage, noLight = TRUE) circlesMatrixAFMImage<-getAFMImageFromMatrix(binaryAFMImage, circlesMatrix) #displayIn3D(circlesMatrixAFMImage, noLight = TRUE) listOfFilters<-sort(unique(circlesMatrixAFMImage@data$h), decreasing = TRUE) listOfFilters filterIndex<-0 while(filterIndex<length(listOfFilters)){ #while(filterIndex<3){ # using the radius map and filter it filterIndex<-filterIndex+1 maxFilter = listOfFilters[filterIndex] maxFilter print(maxFilter) if (maxFilter > 0) { if (filterIndex != 1) max<-listOfFilters[filterIndex-1] else max<-maxHeight max AFMImageNetworksAnalysis = new("AFMImageNetworksAnalysis") AFMImageNetworksAnalysis@heightNetworksslider=1 AFMImageNetworksAnalysis@filterNetworkssliderMin=maxFilter AFMImageNetworksAnalysis@filterNetworkssliderMax<-max AFMImageNetworksAnalysis@smallBranchesTreatment=FALSE AFMImageNetworksAnalysis<-transformAFMImageForNetworkAnalysis(AFMImageNetworksAnalysis, copy(circlesMatrixAFMImage)) newAFMImage<-AFMImageNetworksAnalysis@binaryAFMImage #displayIn3D(binaryAFMImage, noLight=TRUE) #displayIn3D(circlesMatrixAFMImage, noLight=TRUE) #displayIn3D(newAFMImage, noLight=TRUE) # if points were removed in a previous step, do not take them into account newAFMImage@data$h[newCircleAFMImage@data$h == 0]<-0 #displayIn3D(newAFMImage, noLight=TRUE) #displayIn3D(newCircleAFMImage2, noLight=TRUE) print("ok") #dbscan # withtreatedPoints<-newAFMImage@data # allPointsislandsDT<-cbind(lon=1+withtreatedPoints$y*newAFMImage@lines/newAFMImage@vscansize, lat=1+withtreatedPoints$x*newAFMImage@samplesperline/newAFMImage@hscansize) untreatedPoints<-newAFMImage@data[h!=0] untreatedPoints if (nrow(untreatedPoints)>0) { # lon and lat both start from 1 rm(islandsDT) islandsDT<-cbind(lat=1+untreatedPoints$y*newAFMImage@lines/newAFMImage@vscansize, lon=1+untreatedPoints$x*newAFMImage@samplesperline/newAFMImage@hscansize) islandsDT islandsDT<-data.table(lat=islandsDT[,1], lon=islandsDT[,2]) # remove points that are near the border lon_border_width<-floor(newAFMImage@samplesperline*5/100) lon_border_width lat_border_width<-floor(newAFMImage@lines*5/100) lat_border_width islandsDT<-islandsDT[islandsDT$lat>lat_border_width & islandsDT$lat<(newAFMImage@lines-lat_border_width)& islandsDT$lon>lon_border_width & islandsDT$lon<(newAFMImage@samplesperline-lon_border_width)] islandsDT # if (nrow(untreatedPoints)==1) islandsDT<-as.matrix(islandsDT[islandsDT[,1]>lat_border_width & islandsDT[,1]<(newAFMImage@lines-lat_border_width)& # islandsDT[,2]>lon_border_width & islandsDT[,2]<(newAFMImage@samplesperline-lon_border_width)], ncol=2, byrow = FALSE) # islandsDT #if (!all(dim(islandsDT)==0)&nrow(islandsDT)>0) { if (nrow(islandsDT)>0) { # checks (very important) # circlesMatrix[islandsDT$lon,islandsDT$lat] # circlesMatrixAFMImage@data$h[((islandsDT$lat)-1)*circlesMatrixAFMImage@samplesperline + islandsDT$lon] DBSCAN <- dbscan::dbscan(islandsDT, eps = 1.5, MinPts = 1, borderPoints=FALSE) #plot(untreatedPoints$y, untreatedPoints$x, col = DBSCAN$cluster, pch = 20) #plot(islandsDT, col = DBSCAN$cluster, pch = 20) islandsDT<-data.table(islandsDT,cluster=DBSCAN$cluster) setkeyv(islandsDT, "cluster") isolatedIslandsDT<-islandsDT[cluster==0,] isolatedIslandsDT islandsDT<-islandsDT[cluster!=0,] #print(islandsDT) plot( islandsDT$lat, islandsDT$lon, col = islandsDT$cluster, pch = 20, xlim = c(0, newAFMImage@samplesperline), ylim=c(0,newAFMImage@lines)) print("start spliting segment regularly...") clusterChar = data.table ( cluster = unique(islandsDT$cluster), minLon = islandsDT[, min(lon), by=cluster]$V1, maxLon = islandsDT[, max(lon), by=cluster]$V1, minLat = islandsDT[, min(lat), by=cluster]$V1, maxLat = islandsDT[, max(lat), by=cluster]$V1) clusterChar$area<-(clusterChar$maxLat-clusterChar$minLat)*(clusterChar$maxLon-clusterChar$minLon) clusterChar$count<-islandsDT[,.N, by=cluster]$N clusterChar$shape<-sapply(1:nrow(clusterChar),function(i) { if ((clusterChar[i,]$maxLon-clusterChar[i,]$minLon)>(clusterChar[i,]$maxLat-clusterChar[i,]$minLat)) { return("width") }else{ return("height") } }) print(clusterChar) print(maxFilter) rm(resDT6) if (maxFilter==1){ print("STOOP") } #i<-2 #i<-4 print(paste("calculation of primary nodes for maxFilter=", maxFilter)) resDT6<-lapply(1:nrow(clusterChar),function(i,islandsDT, clusterChar, maxFilter,AREA_MIN,CLUSTER_COUNT_MIN) { meanLon<-meanLat<-NULL # print("i") print(paste("cluster",i)) totalHeight<-clusterChar[i,]$maxLat-clusterChar[i,]$minLat #print(totalHeight) totalWidth<-clusterChar[i,]$maxLon-clusterChar[i,]$minLon #print(totalWidth) clusterN<-clusterChar[i,]$cluster #print(clusterN) # if the cluster is small compared to maxFilter, take the barycenter of the cluster if(((clusterChar[i,]$shape == "height")&totalHeight<maxFilter)| ((clusterChar[i,]$shape == "width")&totalWidth<maxFilter)) { # taking the barycenter clusterN<-clusterChar[i,]$cluster resDT2<-islandsDT[cluster %in% clusterN,] tempResDT2<-copy(resDT2) tempResDT2[, meanLon:=mean(lon), by=cluster] tempResDT2[, meanLat:=mean(lat), by=cluster] tempResDT2[, dist:=sqrt((lon-meanLon)^2+(lat-meanLat)^2), by=cluster] tempResDT2[, minDistance:=min(dist), by=cluster] tempResDT2[dist == minDistance, c("lon","lat","cluster"),] tempResDT2<-unique(tempResDT2[dist == minDistance, c("lon","lat","cluster"),],by="cluster",fromLast=TRUE) # print("tempResDT2") # print(tempResDT2) # circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(tempResDT2$lat-1)/circlesMatrixAFMImage@samplesperline) & # circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(tempResDT2$lon-1)/circlesMatrixAFMImage@lines)]$h # print(circleRadius) circleRadius<-rep(c(maxFilter), times=as.integer(nrow(tempResDT2))) return(data.table(cluster=clusterN, lon= tempResDT2$lon, lat= tempResDT2$lat, circleRadius= circleRadius)) }else { minLat<-clusterChar[i,]$minLat maxLat<-clusterChar[i,]$maxLat minLon<-clusterChar[i,]$minLon maxLon<-clusterChar[i,]$maxLon theta<-atan2((maxLon-minLon),(maxLat-minLat)) hypothenuse<-maxFilter # regularly spaced points depending on circleRadius if (clusterChar[i,]$shape == "height") { # number if (cos(theta)!=0) { regularSpace<-hypothenuse/cos(theta) }else{ regularSpace<-hypothenuse/sin(theta) } numberOfIntermediaryPoints<-floor(totalHeight/(regularSpace*2)-1) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLat<-c(clusterChar[i,]$minLat, clusterChar[i,]$maxLat) vectorOfLat }else{ #regularSpace<-floor(totalHeight/(numberOfIntermediaryPoints+1)) vectorOfLat<-seq(from = (minLat+regularSpace), to = (minLat+totalHeight-regularSpace) , by=regularSpace*2) vectorOfLat<-round(vectorOfLat,0) vectorOfLat } # use the medium horizontal position for each vectorOfLat resDT5<-lapply(1:length(vectorOfLat),function(j, islandsDT, vectorOfLon, clusterN) { resDT2<-islandsDT[lat %in% vectorOfLat[j] & cluster %in% clusterN,] #print(resDT2) avgDTLon<-floor(mean(resDT2$lon)) # print("vectorOfLat") # circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(vectorOfLat[j]-1)/circlesMatrixAFMImage@samplesperline) & # circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(avgDTLon-1)/circlesMatrixAFMImage@lines)]$h # print(circleRadius) circleRadius<-rep(maxFilter, times=as.integer(length(avgDTLon))) return(data.table(cluster=clusterN, lon = avgDTLon, lat= vectorOfLat[j], circleRadius= circleRadius)) },islandsDT, vectorOfLat, clusterChar[i,]$cluster) return(rbindlist(resDT5)) }else{ if (cos(theta)!=0) { regularSpace<-hypothenuse/sin(theta) }else{ regularSpace<-hypothenuse/cos(theta) } numberOfIntermediaryPoints<-floor(totalWidth/(regularSpace*2)-1) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLon<-c(minLon, maxLon) vectorOfLon }else{ #regularSpace<-floor(totalWidth/(numberOfIntermediaryPoints+1)) vectorOfLon<-seq(from = (minLon+regularSpace), to = (maxLon-regularSpace) , by=regularSpace*2) #vectorOfLon<-c(minLon,vectorOfLon, maxLon) vectorOfLon<-round(vectorOfLon,0) vectorOfLon } #j=2 resDT5<-lapply(1:length(vectorOfLon),function(j, islandsDT, vectorOfLon, clusterN) { resDT2<-islandsDT[lon %in% vectorOfLon[j] & cluster %in% clusterN,] avgDTLat<-floor(mean(resDT2$lat)) # print("vectorOfLon") # circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(avgDTLat-1)/circlesMatrixAFMImage@samplesperline) & # circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(vectorOfLon[j]-1)/circlesMatrixAFMImage@lines),]$h # print(circleRadius) circleRadius<-rep(maxFilter, times=as.integer(length(avgDTLat))) return(data.table(cluster=clusterN, lon = vectorOfLon[j], lat= avgDTLat, circleRadius= circleRadius)) },islandsDT, vectorOfLon, clusterChar[i,]$cluster) return(rbindlist(resDT5)) } } }, islandsDT, clusterChar,maxFilter,AREA_MIN,CLUSTER_COUNT_MIN) if (maxFilter==1){ print("STOOP") } resDT6<-rbindlist(resDT6) resDT6<-unique(resDT6) resDT6<-resDT6[complete.cases(resDT6),] resDT6<-resDT6[lon != 0 & lon != (AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) & lat!=0 & lat!=(AFMImageNetworksAnalysis@binaryAFMImage@lines-1) ,] print(resDT6) resDT6<-resDT6[circleRadius!=0,] if (nrow(resDT6)==0) break; print("ok") avgDT6<-data.table(cluster= paste0(maxFilter,"-",resDT6$cluster) , lon = resDT6$lon, lat = resDT6$lat, circleRadius=as.vector(circlesMatrix[cbind(resDT6$lon,resDT6$lat)]), keep=rep(TRUE, nrow(resDT6)), vid= getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,resDT6$lon, resDT6$lat)) #print(avgDT6) avgDT6$circleRadius avgDT<-rbind(avgDT, avgDT6) avgDT from<-to<-NULL alledges<-c() allvertices<-c() # bag of each envelope # no need because of !exists("") #pointsInsideEnvelopesToBeRemovedDT=data.table(vid=c(0),lon=c(0),lat=c(0)) # print("for envelope") for(clusterName in unique(avgDT6$cluster)) { # TODO if (nrow(avgDT6[cluster %in% clusterName,c("lon","lat"),])>1){ centers<-as.matrix(avgDT6[cluster %in% clusterName & circleRadius != 0,c("lon","lat"),]) centers colnames(centers) <- NULL r<-unlist(unname(c(avgDT6[cluster %in% clusterName & circleRadius != 0,c("circleRadius"),]))) tryCatch({ # library(PlotRegionHighlighter) # library(sp) print(paste("calculate enveloppe of cluster of points ", clusterName,"with RADIUS_MULTIPLIER=",RADIUS_MULTIPLIER)) envelope <- generatePolygonEnvelope(AFMImageNetworksAnalysis, centers, r*RADIUS_MULTIPLIER) pointsInsideEnvelopeDT<-getAllPointsToRemove(AFMImageNetworksAnalysis, envelope) colnames(pointsInsideEnvelopeDT)<-c("vid","lat","lon") pointsInsideEnvelopeDT # add enveloppe to be removed # print("*** Removing cluster of nodes") # print(centers) # print(r*RADIUS_MULTIPLIER) if (!exists("pointsInsideEnvelopesToBeRemovedDT")) pointsInsideEnvelopesToBeRemovedDT<- pointsInsideEnvelopeDT else pointsInsideEnvelopesToBeRemovedDT<-rbind(pointsInsideEnvelopesToBeRemovedDT, pointsInsideEnvelopeDT) }, error = function(e) { print("extra nodes error 2") print(e) warning(paste("extra nodes error",e)) }, finally = { #print("extra nodes identified") }) } # iterate on all possible edges in order to remove edge between nodes identifyLinksBetweenNodesToCreateNodes<-function(k,AFMImageNetworksAnalysis, newCircleAFMImage, MAX_DISTANCE,allPossibleEdges) { requireNamespace("data.table") requireNamespace("sp") requireNamespace("parallel") requireNamespace("AFM") #TODO #print(k) # TODO if kept remove parameter binaryAFMImage<-AFMImageNetworksAnalysis@binaryAFMImage binaryAFMImage<-newCircleAFMImage currentEdge<-allPossibleEdges[,k] centerId<-currentEdge[1] otherCenterId<-currentEdge[2] circlesTable<-AFMImageNetworksAnalysis@circlesTable[keep %in% c(TRUE),] vedges<-data.table(from = c(""), to = c(""),arrows = c("to")) center1= circlesTable[centerId,] circleRadius1=circlesTable[centerId,]$circleRadius vid1<-getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,center1$lon, center1$lat) otherNodes<-circlesTable[otherCenterId,] #otherNodes<-allNodesAsSpatialPoints[-centerId,] otherNodes$dist<-sp::spDistsN1(pts=matrix(c(otherNodes$lon, otherNodes$lat), ncol=2), pt=c(center1$lon, center1$lat), longlat=FALSE) otherNodes<-otherNodes[with(otherNodes, order(otherNodes$dist)), ] otherNodes<-otherNodes[otherNodes$dist<MAX_DISTANCE,] #print(otherNodes) if (nrow(otherNodes)!=0){ #centerId2Nb<-1 #centerId2Nb<-10 pt<-otherNodes[1,] vid2<-getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,pt$lon, pt$lat) if (AreNodesConnected(binaryAFMImage, center1, circleRadius1, data.table(lon=pt$lon, lat=pt$lat), pt$circleRadius)) { vedges<-rbind(vedges, data.table(from = vid1, to = vid2,arrows = c("to"))) print(paste(vid1,vid2, " edge found")) }else{ #print("edge not found") } }else{ #print("node too far") } return(vedges[-1,]) } edgesProcessed<-c() avgDT6$circleRadius=avgDT6$circleRadius avgDT6$keep<-rep(TRUE, nrow(avgDT6)) avgDT6$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,avgDT6$lon, avgDT6$lat)) avgDT6 avgDT$keep<-rep(TRUE, nrow(avgDT)) avgDT$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,avgDT$lon, avgDT$lat)) avgDT circlesTable<-avgDT[-1,] circlesTable$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,circlesTable$lon, circlesTable$lat)) circlesTable AFMImageNetworksAnalysis@circlesTable<-copy(circlesTable) #AFMImageNetworksAnalysis@binaryAFMImage<-binaryAFMImage # # now identify extra nodes based on links between nodes # print(paste("number of circles=", nrow(circlesTable))) if (is.list(circlesTable) & nrow(circlesTable) > 1) { print(paste(nrow(avgDT6),nrow(circlesTable))) allPossibleEdges<-combn(seq(nrow(circlesTable)-nrow(avgDT6)+1,nrow(circlesTable)), 2, simplify = TRUE) if (nrow(circlesTable)!=nrow(avgDT6)){ allPossibleEdges<-cbind(allPossibleEdges, rbind(rep(seq(nrow(circlesTable)-nrow(avgDT6)+1,nrow(circlesTable)),each=nrow(circlesTable)-nrow(avgDT6)), rep(seq(1,nrow(circlesTable)-nrow(avgDT6)),nrow(avgDT6), by=nrow(avgDT6))) ) } # egdes between new nodes / new nodes and new nodes / old nodes allPossibleEdges print(paste("number of possible edges=", ncol(allPossibleEdges))) print("identifyLinksBetweenNodesToCreateNodes") start.time <- Sys.time() print(start.time) if(clExist) { #cl<-cl print("************************************ using parallel") parallel::clusterEvalQ(cl , c(library("data.table"),library("sp"), library("AFM"),library("parallel"))) parallel::clusterExport(cl, c("AFMImageNetworksAnalysis","newCircleAFMImage","MAX_DISTANCE","allPossibleEdges"), envir=environment()) res<-parallel::parLapplyLB(cl, 1:ncol(allPossibleEdges),identifyLinksBetweenNodesToCreateNodes, AFMImageNetworksAnalysis, newCircleAFMImage, MAX_DISTANCE,allPossibleEdges) }else{ res<-lapply(1:ncol(allPossibleEdges),identifyLinksBetweenNodesToCreateNodes, AFMImageNetworksAnalysis, newCircleAFMImage, MAX_DISTANCE,allPossibleEdges) } end.time <- Sys.time() time.taken <- end.time - start.time vedges<-rbindlist(res) print(vedges) print(paste0("time.taken: ",time.taken)) if (nrow(vedges)>0) { # find edge for the biggest nodes avgDT6 setkeyv(vedges, c("from","to")) vedges<-unique(vedges) colnames(vedges)<-c("vid","to","arrows") vedges$vid<-as.character(vedges$vid) vedges setkeyv(vedges, "vid") setkeyv(avgDT6, "vid") vedges<-merge(vedges,avgDT6[,c("vid","circleRadius","lon","lat","cluster"),], all.x=TRUE) colnames(vedges)<-c("from","vid","arrows","from_circleRadius","from_lon","from_lat","from_cluster") vedges setkeyv(vedges, "vid") vedges<-merge(vedges,avgDT6[,c("vid","circleRadius","lon","lat","cluster"),], all.x=TRUE) colnames(vedges)<-c("from","to","arrows","from_circleRadius","from_lon","from_lat","to_cluster","to_circleRadius","to_lon","to_lat","from_cluster") vedges$total<-vedges$from_circleRadius+vedges$to_circleRadius vedges$dist<-sqrt((vedges$from_lon-vedges$to_lon)^2+(vedges$from_lat-vedges$to_lat)^2) # no edge inside the same cluster vedges<-vedges[from_cluster != to_cluster,] # not several edges between the same couple of clusters fromto_cluster<-sapply(1:nrow(vedges), function(i) { from_cluster<-vedges[i,]$from_cluster to_cluster<-vedges[i,]$to_cluster ifelse(from_cluster<to_cluster,return(paste0(from_cluster," ",to_cluster)), return(paste0(to_cluster," ",from_cluster))) } ) vedges$fromto_cluster<-fromto_cluster vedges<-vedges[order(dist, decreasing = FALSE),] vedges<-unique(vedges, by="fromto_cluster") # remove already processed edge vedges<-vedges[!vedges$fromto_cluster %in% edgesProcessed,] addedNode<-FALSE if (nrow(vedges)>0) { removeDuplicatedEdge<-sapply(1:nrow(vedges), function(i) { from_cluster<-vedges[i,]$from_cluster to_cluster<-vedges[i,]$to_cluster fromto_cluster<-vedges[i,]$fromto_cluster #print(paste("removeDuplicatedEdge - ", from_cluster)) #print(paste("removeDuplicatedEdge - ", to_cluster)) split_from_cluster<-unlist(strsplit(from_cluster, split=" ", fixed=TRUE)) split_to_cluster<-unlist(strsplit(to_cluster, split=" ", fixed=TRUE)) new_split=c(split_from_cluster, split_to_cluster) #print(paste("removeDuplicatedEdge - ", new_split)) #print(paste("removeDuplicatedEdge - ", length(new_split)==length(unique(new_split)))) if (length(new_split)!=length(unique(new_split))) return(TRUE) split_fromto_cluster<-unlist(strsplit(fromto_cluster, split=" ", fixed=TRUE)) #print(paste("removeDuplicatedEdge - ", length(split_fromto_cluster))) if (length(split_fromto_cluster)>2) return(TRUE) # pos1 = grepl(from_cluster, to_cluster, fixed=TRUE) # pos2 = grepl(to_cluster, from_cluster, fixed=TRUE) # if (pos1 == FALSE | pos2 != FALSE) { # return(FALSE) # }else{ # return(TRUE) # } return(FALSE) } ) vedges$remove<-removeDuplicatedEdge #print(vedges) vedges<-vedges[!vedges$remove,] # priority to edge with low distances vedges<-vedges[order(total, -dist, decreasing = TRUE),] print(vedges) # eliminate triangles allVertices=unique(c(vedges$from, vedges$to)) allVertices if(clExist) { vedges<-simplifyNetwork(cl=cl, allVertices=allVertices, allEdges=vedges) }else{ vedges<-simplifyNetwork(allVertices=allVertices, allEdges=vedges) } vedges<-vedges[!vedges$remove,] # no edge between clusters that are connected vedges<-vedges[!dist<total,] # distance should be at least three times the current #vedges<-vedges[dist>=maxFilter*3,] # TODO simplify network / triangles # Browse[2]> vedges # from to arrows from_circleRadius from_lon from_lat to_cluster to_circleRadius to_lon to_lat from_cluster total distance # 1: 12582955 9961521 to 4 49 38 4-3 4 43 48 4-5 8 11.66190 # 2: 13631505 9961495 to 4 23 38 4-2 4 17 52 4-6 8 15.23155 # 3: 8650775 5242889 to 4 9 20 4-1 4 23 33 4-2 8 19.10497 # 4: 12582955 9961495 to 4 23 38 4-2 4 43 48 4-5 8 22.36068 # 5: 12582955 5242889 to 4 9 20 4-1 4 43 48 4-5 8 44.04543 # fromto_cluster remove # 1: 4-3 4-5 FALSE # 2: 4-2 4-6 FALSE # 3: 4-1 4-2 FALSE # 4: 4-2 4-5 FALSE # 5: 4-1 4-5 FALSE #!!lk;l;: vedges2<-copy(vedges) vedges2$old_from<-vedges2$from vedges2$old_to<- vedges2$to vedges2$from<-vedges2$from_cluster vedges2$to<- vedges2$to_cluster allVertices2=unique(c(vedges2$from, vedges2$to)) if(clExist) { vedges2<-simplifyNetwork(cl=cl, allVertices=allVertices2, allEdges=vedges2) }else{ vedges2<-simplifyNetwork(allVertices=allVertices2, allEdges=vedges2) } vedges2<-vedges2[!vedges2$remove,] vedges2 vedges2$from<-vedges2$old_from vedges2$to<- vedges2$old_to vedges2$old_from<-NULL vedges2$old_to<- NULL vedges<-copy(vedges2) # add nodes on the possible edge addedNode<-TRUE edgeIndex<-0 while(edgeIndex<nrow(vedges)) { print("trying to add nodes on edges") #print("add node on edges") addedNode<-TRUE # find envelopes for first edge only avgDT6 edgeIndex<-edgeIndex+1 # print(vedges[edgeIndex,]$from) # print(vedges[edgeIndex,]$to) cluster centers<-as.matrix(rbind(circlesTable[vid %in% vedges[edgeIndex,]$from,c("lon","lat"),], circlesTable[vid %in% vedges[edgeIndex,]$to,c("lon","lat"),]),rownames.force=NA) colnames(centers) <- NULL r<-unlist(unname(c(circlesTable[vid %in% vedges[edgeIndex,]$from,c("circleRadius"),], circlesTable[vid %in% vedges[edgeIndex,]$to,c("circleRadius"),]))) r tryCatch({ # library(PlotRegionHighlighter) # library(sp) # envelope <- generatePolygonEnvelope(AFMImageNetworksAnalysis, centers, r) # pointsInsideEnvelopeDT<-getAllPointsToRemove(AFMImageNetworksAnalysis, envelope) # colnames(pointsInsideEnvelopeDT)<-c("vid","lat","lon") # pointsInsideEnvelopeDT envelopeToBeRemoved <- generatePolygonEnvelope(AFMImageNetworksAnalysis, centers, r*RADIUS_MULTIPLIER) pointsInsideEnvelopeToBeRemovedDT<-getAllPointsToRemove(AFMImageNetworksAnalysis, envelopeToBeRemoved) colnames(pointsInsideEnvelopeToBeRemovedDT)<-c("vid","lat","lon") pointsInsideEnvelopeToBeRemovedDT print("Removing edges envelopes for edges") # print(centers) # print(r*RADIUS_MULTIPLIER) # add enveloppe to be removed pointsInsideEnvelopesToBeRemovedDT<-rbind(pointsInsideEnvelopesToBeRemovedDT, pointsInsideEnvelopeToBeRemovedDT) print("start spliting segment regularly...") edgesProcessed<-c(edgesProcessed, vedges[edgeIndex,]$fromto_cluster) clusterChar = data.table ( cluster = vedges[edgeIndex,]$fromto_cluster, minLon = min(pointsInsideEnvelopeToBeRemovedDT$lon), maxLon = max(pointsInsideEnvelopeToBeRemovedDT$lon), minLat = min(pointsInsideEnvelopeToBeRemovedDT$lat), maxLat = max(pointsInsideEnvelopeToBeRemovedDT$lat)) clusterChar$area<-(clusterChar$maxLat-clusterChar$minLat)*(clusterChar$maxLon-clusterChar$minLon) #clusterChar$count<-islandsDT[,.N, by=c(cluster]$N clusterChar$shape<-sapply(1:nrow(clusterChar),function(i) { if ((clusterChar[i,]$maxLon-clusterChar[i,]$minLon)>(clusterChar[i,]$maxLat-clusterChar[i,]$minLat)) { return("width") }else{ return("height") } }) print(clusterChar) #i<-1 # split regularly on the edge # only one edge no need of lapply resDT60<-lapply(1:nrow(clusterChar),function(i,pointsInsideEnvelopeDT, circlesMatrixAFMImage, clusterChar, maxFilter, centers, r) { print(i) totalHeight<-clusterChar[i,]$maxLat-clusterChar[i,]$minLat #print(totalHeight) totalWidth<-clusterChar[i,]$maxLon-clusterChar[i,]$minLon #print(totalWidth) # only on cluster... totalHeight<-abs(centers[1,2]-centers[2,2]) #print(totalHeight) totalWidth<-abs(centers[1,1]-centers[2,1]) #print(totalWidth) clusterN<-clusterChar[i,]$cluster #print(clusterN) if(((clusterChar[i,]$shape == "height")&totalHeight<maxFilter)| ((clusterChar[i,]$shape == "width")&totalWidth<maxFilter)) { # taking the barycenter tempResDT2<-copy(pointsInsideEnvelopeDT) tempResDT2$cluster<-rep(1, nrow(tempResDT2)) tempResDT2[, meanLon:=mean(lon), by=cluster] tempResDT2[, meanLat:=mean(lat), by=cluster] tempResDT2[, dist:=sqrt((lon-meanLon)^2+(lat-meanLat)^2), by=cluster] tempResDT2[, minDistance:=min(dist), by=cluster] tempResDT2[dist == minDistance, c("lon","lat","cluster"),] tempResDT2<-unique(tempResDT2[dist == minDistance, c("lon","lat","cluster"),],by="cluster",fromLast=TRUE) tempResDT2 # circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(tempResDT2$lat-1)/circlesMatrixAFMImage@samplesperline) & # circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(tempResDT2$lon-1)/circlesMatrixAFMImage@lines)]$h # print(circleRadius) circleRadius<-rep(c(maxFilter), times=as.integer(nrow(tempResDT2))) return(data.table(cluster=clusterN, lon= tempResDT2$lon, lat= tempResDT2$lat, circleRadius= circleRadius)) }else { minLat<-min(c(centers[1,2],centers[2,2])) maxLat<-max(c(centers[1,2],centers[2,2])) minLon<-min(c(centers[1,1],centers[2,1])) maxLon<-max(c(centers[1,1],centers[2,1])) theta<-atan2((maxLon-minLon),(maxLat-minLat)) hypothenuse<-maxFilter # regularly spaced points depending on circleRadius if (clusterChar[i,]$shape == "height") { # number if (cos(theta)!=0) { regularSpace<-hypothenuse/cos(theta) }else{ regularSpace<-hypothenuse/sin(theta) } numberOfIntermediaryPoints<-floor(totalHeight/regularSpace-1) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLat<-c(clusterChar[i,]$minLat, clusterChar[i,]$maxLat) vectorOfLat }else{ #regularSpace<-floor(totalHeight/(numberOfIntermediaryPoints+1)) vectorOfLat<-seq(from = (minLat+regularSpace), to = (minLat+totalHeight-regularSpace) , by=regularSpace) vectorOfLat<-round(vectorOfLat,0) vectorOfLat } # use the medium horizontal position for each vectorOfLat resDT5<-lapply(1:length(vectorOfLat),function(j, pointsInsideEnvelopeDT, vectorOfLat, clusterN) { resDT2<-pointsInsideEnvelopeDT[lat %in% vectorOfLat[j],] #print(resDT2) avgDTLon<-floor(mean(resDT2$lon)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, vectorOfLat[j] )) circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(vectorOfLat[j]-1)/circlesMatrixAFMImage@samplesperline) & circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(avgDTLon-1)/circlesMatrixAFMImage@lines)]$h circleRadius return(data.table(cluster=clusterN, lon = avgDTLon, lat= vectorOfLat[j], circleRadius= circleRadius)) },pointsInsideEnvelopeDT, vectorOfLat, clusterChar[i,]$cluster) return(rbindlist(resDT5)) }else{ if (cos(theta)!=0) { regularSpace<-hypothenuse/sin(theta) }else{ regularSpace<-hypothenuse/cos(theta) } numberOfIntermediaryPoints<-floor(totalWidth/regularSpace-1) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLon<-c(minLon, maxLon) vectorOfLon }else{ #regularSpace<-floor(totalWidth/(numberOfIntermediaryPoints+1)) vectorOfLon<-seq(from = (minLon+regularSpace), to = (maxLon-regularSpace) , by=regularSpace) #vectorOfLon<-c(minLon,vectorOfLon, maxLon) vectorOfLon<-round(vectorOfLon,0) vectorOfLon } #j=1 resDT5<-lapply(1:length(vectorOfLon),function(j, pointsInsideEnvelopeDT, vectorOfLon, clusterN) { resDT2<-pointsInsideEnvelopeDT[lon %in% vectorOfLon[j],] #print(resDT2) avgDTLat<-floor(mean(resDT2$lat)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, vectorOfLon[j] )) circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*(avgDTLat-1)/circlesMatrixAFMImage@samplesperline) & circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*(vectorOfLon[j]-1)/circlesMatrixAFMImage@lines)]$h circleRadius return(data.table(cluster=clusterN, lon = vectorOfLon[j], lat= avgDTLat, circleRadius= circleRadius)) },pointsInsideEnvelopeDT, vectorOfLon, clusterChar[i,]$cluster) return(rbindlist(resDT5)) } } }, pointsInsideEnvelopeToBeRemovedDT, circlesMatrixAFMImage, clusterChar,maxFilter=max(r), centers, r) resDT60<-rbindlist(resDT60) resDT60<-unique(resDT60) resDT60<-resDT60[complete.cases(resDT60),] resDT60<-resDT60[lon != 0 & lon != (AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) & lat!=0 & lat!=(AFMImageNetworksAnalysis@binaryAFMImage@lines-1) ,] #resDT60<-resDT60[circleRadius!=0] print("ok") resDT60$keep<-rep(TRUE,nrow(resDT60)) resDT60$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,resDT60$lon, resDT60$lat)) print("nodes added on one edge") print(resDT60) resDT60$circleRadius<-rep(max(r),nrow(resDT60)) print(resDT60) # add nodes regularly in enveloppe avgDT6<-rbind(avgDT6, resDT60) avgDT6 # merge cluster for one vid # the cluster name is sorted for (avid in avgDT6[duplicated(avgDT6,by=c("vid"))]$vid){ allCluster<-unlist(strsplit(avgDT6[vid %in% avid,]$cluster," ")) mergeCluster<-unique(allCluster) finalCluster=paste(mergeCluster[order(mergeCluster)],collapse=" ") print(finalCluster) avgDT6[vid %in% avid,]$cluster<-finalCluster } for (avid in avgDT6[duplicated(avgDT6,by=c("vid"))]$vid){ pointDT<-avgDT6[vid %in% avid,] # circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*pointDT$lat/circlesMatrixAFMImage@samplesperline) & # circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*pointDT$lon/circlesMatrixAFMImage@lines)]$h circleRadius<-max(avgDT6[vid %in% avid,]$circleRadius) avgDT6[vid %in% avid,]$circleRadius<-circleRadius } avgDT6<-unique(avgDT6) #print(avgDT6) # Manage addition and removal of nodes avgDT7<-copy(resDT60) newCircleAFMImage2<-addNode(newCircleAFMImage2, avgDT7,filterIndex) avgDT7$circleRadius<-avgDT7$circleRadius*RADIUS_MULTIPLIER newCircleAFMImage<-removeNode(newCircleAFMImage, avgDT7) #, RADIUS_MULTIPLIER, BIGGER_CIRCLE_RADIUS, BIGGER_CIRCLE_RADIUS_MULTILPLIER) maxFilter # remove all the envelopes # displayIn3D(newCircleAFMImage, noLight=TRUE) # displayIn3D(newCircleAFMImage2, noLight=TRUE) avgDT<-rbind(avgDT, resDT60) avgDT #newCircleAFMImage@data$h[(pointsInsideEnvelopeDT$coords.x2-1)*newCircleAFMImage@samplesperline+(pointsInsideEnvelopeDT$coords.x1)]<-0 # circlesMatrixAFMImage@data$h[((islandsDT[,1])-1)*circlesMatrixAFMImage@samplesperline + islandsDT[,2]] }, error = function(e) { print("extra nodes error 1") print(e) warning(paste("extra nodes error 1",e)) }, finally = { #print("extra nodes identified") }) } } if (!addedNode) print("no more edge to analyse") } # remove enveloppes from nodes print(pointsInsideEnvelopesToBeRemovedDT) #TBD good #newCircleAFMImage@data$h[pointsInsideEnvelopesToBeRemovedDT$coords.x2+1+(pointsInsideEnvelopesToBeRemovedDT$coords.x1)*newCircleAFMImage@samplesperline]<-0 newCircleAFMImage@data$h[pointsInsideEnvelopesToBeRemovedDT$lon+1+(pointsInsideEnvelopesToBeRemovedDT$lat)*newCircleAFMImage@samplesperline]<-0 }else{ if (is.list(circlesTable) & nrow(circlesTable) == 1) { # Manage addition and removal of nodes avgDT7<-copy(avgDT6) avgDT7$circleRadius<-avgDT7$circleRadius*RADIUS_MULTIPLIER newCircleAFMImage<-removeNode(newCircleAFMImage, avgDT7) #, RADIUS_MULTIPLIER, BIGGER_CIRCLE_RADIUS, BIGGER_CIRCLE_RADIUS_MULTILPLIER) newCircleAFMImage2<-addNode(newCircleAFMImage2, avgDT6,filterIndex) # remove enveloppes from nodes print(pointsInsideEnvelopesToBeRemovedDT) #TBD good #newCircleAFMImage@data$h[pointsInsideEnvelopesToBeRemovedDT$coords.x2+1+(pointsInsideEnvelopesToBeRemovedDT$coords.x1)*newCircleAFMImage@samplesperline]<-0 newCircleAFMImage@data$h[pointsInsideEnvelopesToBeRemovedDT$lon+1+(pointsInsideEnvelopesToBeRemovedDT$lat)*newCircleAFMImage@samplesperline]<-0 } } #displayIn3D(newCircleAFMImage, noLight=FALSE) #displayIn3D(newCircleAFMImage2, noLight=FALSE) } } } } #} print(filterIndex) avgDT<-avgDT[-1] #displayIn3D(AFMImageNetworksAnalysis@binaryAFMImage, noLight=FALSE) #displayIn3D(newCircleAFMImage, noLight=FALSE) #displayIn3D(newCircleAFMImage2, noLight=FALSE) AFMImageNetworksAnalysis@smallBranchesTreatment<-FALSE # small branches treatment if (AFMImageNetworksAnalysis@smallBranchesTreatment) { # newCircleAFMImage<-copy(AFMImageNetworksAnalysis@binaryAFMImage) # displayIn3D(newCircleAFMImage, noLight=FALSE) # remove the edge of the image edgeWidth<-listOfFilters[1] removedEdgeData<-copy(newCircleAFMImage@data) removedEdgeData[removedEdgeData$x<edgeWidth*newCircleAFMImage@hscansize/newCircleAFMImage@samplesperline]<-0 removedEdgeData[removedEdgeData$y<edgeWidth*newCircleAFMImage@vscansize/newCircleAFMImage@lines]<-0 removedEdgeData[removedEdgeData$x>(1-edgeWidth/newCircleAFMImage@samplesperline)*newCircleAFMImage@hscansize]<-0 removedEdgeData[removedEdgeData$y>(1-edgeWidth/newCircleAFMImage@lines)*newCircleAFMImage@vscansize]<-0 newCircleAFMImage@data<-copy(removedEdgeData) #displayIn3D(newCircleAFMImage, noLight=FALSE) # finding the extra small nodes untreatedPoints<-newCircleAFMImage@data[h!=0] islandsDT<-cbind(lon=1+untreatedPoints$y*newCircleAFMImage@lines/newCircleAFMImage@vscansize, lat=1+untreatedPoints$x*newCircleAFMImage@samplesperline/newCircleAFMImage@hscansize) if (nrow(islandsDT)>0) { DBSCAN <- dbscan(islandsDT, eps = 1.5, MinPts = 3, borderPoints=FALSE) #plot(untreatedPoints$y, untreatedPoints$x, col = DBSCAN$cluster, pch = 20) #plot(islandsDT, col = DBSCAN$cluster, pch = 20) islandsDT<-data.table(islandsDT,cluster=DBSCAN$cluster) setkeyv(islandsDT, "cluster") isolatedIslandsDT<-islandsDT[cluster==0,] isolatedIslandsDT islandsDT<-islandsDT[cluster!=0,] islandsDT if (nrow(islandsDT)>0) { #clusterN<-1 identifyLinksBetweenClustersAndExistingNodes<-function(clusterN, AFMImageNetworksAnalysis, MAX_DISTANCE, avgDT, islandsDT) { requireNamespace("data.table") requireNamespace("sp") requireNamespace("AFM") clusterLon<-clusterLat<-cluster<-IDX<-keepThinPoints<-meandist<-NULL #print(clusterN) resDT<-data.table(cluster=c(0), clusterLon=c(0), clusterLat=c(0), existingNodeLon=c(0), existingNodeLat=c(0)) centers1<-islandsDT[islandsDT$cluster %in% clusterN,] # define the points in the circle otherNodes<-copy(avgDT) for (center_index in seq(1,nrow(centers1))) { center1<-centers1[center_index,] circleRadius1<-1 #otherNodes<-allNodesAsSpatialPoints[!(allNodesAsSpatialPoints$coords.x1==center1$lon&allNodesAsSpatialPoints$coords.x2==center1$lat)] minLat<- ifelse((center1$lat-MAX_DISTANCE)>0, center1$lat-MAX_DISTANCE, 0) maxLat<- ifelse((center1$lat+MAX_DISTANCE)<AFMImageNetworksAnalysis@binaryAFMImage@lines, center1$lat+MAX_DISTANCE, AFMImageNetworksAnalysis@binaryAFMImage@lines-1) minLon<- ifelse((center1$lon-MAX_DISTANCE)>0, center1$lon-MAX_DISTANCE, 0) maxLon<- ifelse((center1$lon+MAX_DISTANCE)<AFMImageNetworksAnalysis@binaryAFMImage@samplesperline, center1$lon+MAX_DISTANCE, AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) otherNodes2<-copy(avgDT[lon>=minLon&lon<=maxLon&lat>=minLat&lat<=maxLat,]) otherNodes2$dist<-sp::spDistsN1(pts=matrix(c(otherNodes2$lon, otherNodes2$lat), ncol=2), pt=c(center1$lon, center1$lat), longlat=FALSE) #otherNodes otherNodes2<-otherNodes2[with(otherNodes2, order(otherNodes2$dist)), ] otherNodes2<-otherNodes2[otherNodes2$dist<MAX_DISTANCE,] if (nrow(otherNodes2)>0) { for (centerId2Nb in seq(1, nrow(otherNodes2))) { pt<-otherNodes2[centerId2Nb,] # print(center1) # print(circleRadius1) # print(pt) if (AreNodesConnected(AFMImageNetworksAnalysis@binaryAFMImage, center1, circleRadius1, data.table(lon=pt$lon, lat=pt$lat), pt$circleRadius)) { #print("yes") resDT=rbind(resDT, data.table(cluster=clusterN, clusterLon=center1$lon, clusterLat=center1$lat, existingNodeLon=pt$lon, existingNodeLat=pt$lat)) } } } } resDT<-resDT[-1,] return(resDT) } print(paste("number of nodes=", nrow(avgDT))) print(paste("number of clusters=", length(unique(islandsDT$cluster)))) #MB TODO # more in the width or in the height ? print("start spliting segment regularly for small branches...") clusterChar = data.table ( cluster = unique(islandsDT$cluster), minLon = islandsDT[, min(lon), by=cluster]$V1, maxLon = islandsDT[, max(lon), by=cluster]$V1, minLat = islandsDT[, min(lat), by=cluster]$V1, maxLat = islandsDT[, max(lat), by=cluster]$V1) clusterChar$area<-(clusterChar$maxLat-clusterChar$minLat)*(clusterChar$maxLon-clusterChar$minLon) clusterChar$count<-islandsDT[,.N, by=cluster]$N clusterChar$shape<-sapply(1:nrow(clusterChar),function(i) { if ((clusterChar[i,]$maxLon-clusterChar[i,]$minLon)>(clusterChar[i,]$maxLat-clusterChar[i,]$minLat)) { return("width") }else{ return("height") } }) print(clusterChar) # TODO print("calculate extra node from edge") rm(resDT6) i=3 #i=6 resDT6<-lapply(1:nrow(clusterChar),function(i,islandsDT, clusterChar, maxFilter,AREA_MIN,CLUSTER_COUNT_MIN) { meanLon<-meanLat<-NULL print(i) totalHeight<-clusterChar[i,]$maxLat-clusterChar[i,]$minLat #print(totalHeight) totalWidth<-clusterChar[i,]$maxLon-clusterChar[i,]$minLon #print(totalWidth) #if ((clusterChar[i,]$area <= AREA_MIN)|(clusterChar[i,]$count <= CLUSTER_COUNT_MIN)) { if(((clusterChar[i,]$shape == "height")&totalHeight<maxFilter)| ((clusterChar[i,]$shape == "width")&totalWidth<maxFilter)) { # # if sample not extremely small # if (!clusterChar[i,]$count <= 3) { # taking the barycenter is useless because of low number of points # sample only one point if the cluster is with small area clusterN<-clusterChar[i,]$cluster resDT2<-islandsDT[cluster %in% clusterN,] #print(resDT2) #sampleC<-sample(1:nrow(resDT2),1) tempBarycenterDT<-copy(resDT2) tempBarycenterDT[, meanLon:=mean(lon), by=cluster] tempBarycenterDT[, meanLat:=mean(lat), by=cluster] tempBarycenterDT[, dist:=sqrt((lon-meanLon)^2+(lat-meanLat)^2), by=cluster] tempBarycenterDT[, minDistance:=min(dist), by=cluster] tempBarycenterDT[dist == minDistance, c("lon","lat","cluster"),] tempBarycenterDT<-unique(tempBarycenterDT[dist == minDistance, c("lon","lat","cluster"),],by="cluster",fromLast=TRUE) #tempBarycenterDT<-unique(islandsDT,by="cluster",fromLast=TRUE) tempBarycenterDT return(data.table(cluster=clusterN, lon = tempBarycenterDT$lon, lat= tempBarycenterDT$lat)) #return(data.table(cluster=clusterN, lon = resDT2[sampleC,]$lon, lat= resDT2[sampleC,]$lat)) # } }else { # regularly spaced points depending on circleRadius if (clusterChar[i,]$shape == "height") { # number #sSample = floor(SAMPLE_ON_THIN_PORTIONS*totalHeight/100) numberOfIntermediaryPoints<-floor(totalHeight/maxFilter-2) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLat<-c(clusterChar[i,]$minLat, clusterChar[i,]$maxLat) vectorOfLat }else{ regularSpace<-floor(totalHeight/(numberOfIntermediaryPoints+1)) vectorOfLat<-seq(from = (clusterChar[i,]$minLat+regularSpace), to = (clusterChar[i,]$minLat+totalHeight-regularSpace) , by=regularSpace) vectorOfLat<-c(clusterChar[i,]$minLat,vectorOfLat, clusterChar[i,]$maxLat) vectorOfLat } # use the medium horizontal position for each vectorOfLat resDT5<-lapply(1:length(vectorOfLat),function(j, islandsDT, vectorOfLat, clusterN) { resDT2<-islandsDT[lat %in% vectorOfLat[j] & cluster %in% clusterN,] #print(resDT2) avgDTLon<-floor(mean(resDT2$lon)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, vectorOfLat[j] )) return(data.table(cluster=clusterN, lon = avgDTLon, lat= vectorOfLat[j])) },islandsDT, vectorOfLat, clusterChar[i,]$cluster) #print(resDT5) #print(rbindlist(resDT5)) return(rbindlist(resDT5)) }else{ numberOfIntermediaryPoints<-floor(totalWidth/maxFilter-2) if (numberOfIntermediaryPoints<=0) { numberOfIntermediaryPoints<-0 vectorOfLon<-c(clusterChar[i,]$minLon, clusterChar[i,]$maxLon) vectorOfLon }else{ regularSpace<-floor(totalWidth/(numberOfIntermediaryPoints+1)) vectorOfLon<-seq(from = (clusterChar[i,]$minLon+regularSpace), to = (clusterChar[i,]$minLon+totalWidth-regularSpace) , by=regularSpace) vectorOfLon<-c(clusterChar[i,]$minLon,vectorOfLon, clusterChar[i,]$maxLon) vectorOfLon } #j=1 resDT5<-lapply(1:length(vectorOfLon),function(j, islandsDT, vectorOfLon, clusterN) { resDT2<-islandsDT[lon %in% vectorOfLon[j] & cluster %in% clusterN,] #print(resDT2) avgDTLat<-floor(mean(resDT2$lat)) #print(paste("cluster ", clusterN, "keep ",avgDTLon, vectorOfLon[j] )) return(data.table(cluster=clusterN, lon = vectorOfLon[j], lat= avgDTLat)) },islandsDT, vectorOfLon, clusterChar[i,]$cluster) #print(resDT5) #print(rbindlist(resDT5)) return(rbindlist(resDT5)) } } }, islandsDT, clusterChar,maxFilter=1,AREA_MIN,CLUSTER_COUNT_MIN) resDT6<-rbindlist(resDT6) resDT6<-unique(resDT6) resDT6<-resDT6[complete.cases(resDT6),] resDT6<-resDT6[lon != 0 & lon != (AFMImageNetworksAnalysis@binaryAFMImage@samplesperline-1) & lat!=0 & lat!=(AFMImageNetworksAnalysis@binaryAFMImage@lines-1) ,] resDT6 print("ok") resDT6$keep<-rep(TRUE, nrow(resDT6)) resDT6$circleRadius=rep(0, nrow(resDT6)) resDT6$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,resDT6$lon, resDT6$lat)) avgDT<-rbind(avgDT, data.table(lon = resDT6$lon, lat = resDT6$lat, circleRadius=resDT6$circleRadius, keep=resDT6$keep, vid=resDT6$vid)) avgDT } } } print(avgDT) for (avid in avgDT[duplicated(avgDT,by=c("vid"))]$vid){ allCluster<-unlist(strsplit(avgDT[vid %in% avid,]$cluster," ")) mergeCluster<-unique(allCluster) finalCluster=paste(mergeCluster[order(mergeCluster)],collapse=" ") print(finalCluster) avgDT[vid %in% avid,]$cluster<-finalCluster } for (avid in avgDT[duplicated(avgDT,by=c("vid"))]$vid){ pointDT<-avgDT[vid %in% avid,] circleRadius<-circlesMatrixAFMImage@data[circlesMatrixAFMImage@data$y %in% (circlesMatrixAFMImage@vscansize*pointDT$lat/circlesMatrixAFMImage@samplesperline) & circlesMatrixAFMImage@data$x %in% (circlesMatrixAFMImage@hscansize*pointDT$lon/circlesMatrixAFMImage@lines)]$h avgDT[vid %in% avid,]$circleRadius<-circleRadius } avgDT<-unique(avgDT) print(avgDT) AFMImageNetworksAnalysis@binaryAFMImage<-copy(binaryAFMImage) AFMImageNetworksAnalysis@binaryAFMImageWithCircles<-copy(newCircleAFMImage2) AFMImageNetworksAnalysis@circlesTable<-copy(unique(avgDT)) return(AFMImageNetworksAnalysis) } #' display the network of nodes and edges #' #' @param ... cl: a cluster object from the parallel package #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param MAX_DISTANCE the maximum distance between nodes to check if nodes are connected. Default value is 40. #' @export #' @author M.Beauvais identifyEdgesFromCircles<-function(...,AFMImageNetworksAnalysis, MAX_DISTANCE=40) { print("BOOOM") force(AFMImageNetworksAnalysis) keep<-fromto<-NULL args<-names(list(...)) print(args) if (is.null(args)) { clExist<-FALSE }else{ clExist<-c(match('cl',args)!=-1) cl<-cl } if (clExist) { print("using parallel") requireNamespace("parallel") } from<-to<-NULL alledges<-c() allvertices<-c() # for all the nodes of the future networks # for all the points in the circles in the plot # identify if a link is available with all the other nodes identifyLinksBetweenNodes<-function(k, AFMImageNetworksAnalysis, MAX_DISTANCE, allPossibleEdges) { requireNamespace("data.table") requireNamespace("sp") requireNamespace("parallel") requireNamespace("AFM") #TODO #print(paste("k",k)) keep<-NULL currentEdge<-allPossibleEdges[,k] centerId<-currentEdge[1] otherCenterId<-currentEdge[2] #! circlesTable<-AFMImageNetworksAnalysis@circlesTable[keep %in% c(TRUE),] vedges<-data.table(from = c(""), to = c(""),arrows = c("to")) center1= circlesTable[centerId,] circleRadius1=circlesTable[centerId,]$circleRadius vid1<-getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,center1$lon, center1$lat) otherNodes<-circlesTable[otherCenterId,] otherNodes$dist<-sp::spDistsN1(pts=matrix(c(otherNodes$lon, otherNodes$lat), ncol=2), pt=c(center1$lon, center1$lat), longlat=FALSE) otherNodes<-otherNodes[with(otherNodes, order(otherNodes$dist)), ] otherNodes<-otherNodes[otherNodes$dist<MAX_DISTANCE,] if (nrow(otherNodes)!=0){ #print(otherNodes) #centerId2Nb<-1 #centerId2Nb<-10 pt<-otherNodes[1,] vid2<-getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,pt$lon, pt$lat) # if (k == 512) { # print("******************** 512") # print(center1) # print(circleRadius1) # print(pt) # } if (AreNodesConnected(AFMImageNetworksAnalysis@binaryAFMImage, center1, circleRadius1, data.table(lon=pt$lon, lat=pt$lat), pt$circleRadius)) { #print(paste("segment exists",center1$lon, center1$lat,":",pt$coords.x1, pt$coords.x2)) vedges<-rbind(vedges, data.table(from = vid1, to = vid2,arrows = c("to"))) #displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, edges=vedges[-1,], isolates = c()) } }else{ #print("node too far") } return(vedges[-1,]) } circlesTable<-data.table(copy(AFMImageNetworksAnalysis@circlesTable[keep %in% c(TRUE),])) circlesTable$vid<-as.character(getVertexId(AFMImageNetworksAnalysis@binaryAFMImage,circlesTable$lon, circlesTable$lat)) circlesTable print(paste("number of circles=", nrow(circlesTable))) if (is.list(circlesTable) & nrow(circlesTable) > 1) { allPossibleEdges<-combn(1:nrow(circlesTable), 2, simplify = TRUE) print(paste("number of possible edges=", ncol(allPossibleEdges))) start.time <- Sys.time() print(start.time) if(clExist) { print("using parallel for identifyLinksBetweenNodes") #cl<-cl parallel::clusterEvalQ(cl , c(library("data.table"),library("sp"), library("AFM"),library("parallel"))) parallel::clusterExport(cl, c("AFMImageNetworksAnalysis","MAX_DISTANCE","allPossibleEdges"), envir=environment()) res<-parallel::parLapply(cl, 1:ncol(allPossibleEdges),identifyLinksBetweenNodes, AFMImageNetworksAnalysis, MAX_DISTANCE,allPossibleEdges) }else{ res<-lapply(1:ncol(allPossibleEdges),identifyLinksBetweenNodes, AFMImageNetworksAnalysis, MAX_DISTANCE,allPossibleEdges) } end.time <- Sys.time() time.taken <- end.time - start.time vedges<-rbindlist(res) print(vedges) print(paste0("time.taken: ",time.taken)) # new treatment allEdges<-copy(vedges) allEdges allEdges$from.coords.x1<-sapply(1:nrow(allEdges),function(i) getCoordinatesFromVertexId( allEdges[i,c("from"),with=FALSE])$coords.x1) allEdges$from.coords.x2<-sapply(1:nrow(allEdges),function(i) getCoordinatesFromVertexId(allEdges[i,c("from"),with=FALSE])$coords.x2) allEdges$to.coords.x1<-sapply(1:nrow(allEdges),function(i) getCoordinatesFromVertexId(allEdges[i,c("to"),with=FALSE])$coords.x1) allEdges$to.coords.x2<-sapply(1:nrow(allEdges),function(i) getCoordinatesFromVertexId(allEdges[i,c("to"),with=FALSE])$coords.x2) allEdges$dist<-sapply(1:nrow(allEdges),function(i) sp::spDistsN1(pts=as.matrix(cbind(allEdges[i,]$from.coords.x1,allEdges[i,]$from.coords.x2)), pt=as.matrix(cbind(allEdges[i,]$to.coords.x1,allEdges[i,]$to.coords.x2)),longlat=FALSE)) allEdges$keep<-rep(TRUE, nrow(allEdges)) newcirclesTable<-copy(circlesTable[,c("vid","circleRadius"),]) setkey(allEdges, from) colnames(newcirclesTable)<-c("from","from_circleRadius") setkey(newcirclesTable,from) allEdges<-merge(allEdges, newcirclesTable,all.x = TRUE) allEdges setkey(allEdges, to) colnames(newcirclesTable)<-c("to","to_circleRadius") setkey(newcirclesTable,to) allEdges<-merge(allEdges, newcirclesTable,all.x = TRUE) #print(allEdges) allVertices<-unique(c(allEdges$from, allEdges$to)) #print(allVertices) # for each edge (u, v): # for each vertex w: # if (v, w) is an edge and (w, u) is an edge return true # else return false setkey(allEdges, from, to) totalNumberOfEdges<-nrow(allEdges) print(paste0("totalNumberOfEdges=",totalNumberOfEdges)) print("simplify network") if(clExist) { allEdges<-simplifyNetwork(cl=cl, allVertices=allVertices, allEdges=allEdges) }else{ allEdges<-simplifyNetwork(allVertices=allVertices, allEdges=allEdges) } # allEdges # allEdges[keep %in% c(FALSE),] print("simplify ended") # displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, # allEdges, # AFMImageNetworksAnalysis@isolatedNodesList) # # displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, # allEdges[keep %in% c(TRUE),], # AFMImageNetworksAnalysis@isolatedNodesList) mn <- pmin(allEdges$to, allEdges$from) mx <- pmax(allEdges$to, allEdges$from) int <- as.numeric(interaction(mn, mx)) allEdges<-allEdges[match(unique(int), int),] AFMImageNetworksAnalysis@edgesTable<-copy(allEdges) AFMImageNetworksAnalysis@edgesTable<-AFMImageNetworksAnalysis@edgesTable[dist !=0,] # displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, # AFMImageNetworksAnalysis@edgesTable[keep %in% c(TRUE),], # AFMImageNetworksAnalysis@isolatedNodesList) # # displaygridIgraphPlotFromEdges(AFMImageNetworksAnalysis@binaryAFMImage, # AFMImageNetworksAnalysis@edgesTable, # AFMImageNetworksAnalysis@isolatedNodesList) }else{ warning("no treatment because no circle") } return(AFMImageNetworksAnalysis) } #' fusion the nodes that are intersecting #' #' manage the fusion of nodes which circles instersect #' keep all the circles, manage a fusion table #' node id / fusion id #' #' #' @param AFMImageNetworksAnalysis the AFMImageNetworksAnalysis instance #' @return a list of edges with fusioned nodes #' @export #' @author M.Beauvais fusionCloseNodes<-function(AFMImageNetworksAnalysis) { group<-mean_lon<-lon<-mean_lat<-lat<-vertexId<-from<-to<-vedges<-NULL AFMImageNetworksAnalysis@circlesTable AFMImageNetworksAnalysis@circlesTable$group<-rep(0, nrow(AFMImageNetworksAnalysis@circlesTable)) groupNumber<-0 for (centerId in seq(1, nrow(AFMImageNetworksAnalysis@circlesTable))) { #centerId=6 print(paste0(centerId," / ", nrow(AFMImageNetworksAnalysis@circlesTable))) center<- AFMImageNetworksAnalysis@circlesTable[centerId,] radiusVector<-center$circleRadius+AFMImageNetworksAnalysis@circlesTable$circleRadius distVector<-sp::spDistsN1(pts=matrix(c(AFMImageNetworksAnalysis@circlesTable$lon,AFMImageNetworksAnalysis@circlesTable$lat),ncol=2), pt=matrix(c(center$lon,center$lat),ncol=2), longlat=FALSE) intersectVector<-distVector-radiusVector-2 # print(radiusVector) # print(distVector) print(intersectVector) listOfIntersect<-which(intersectVector<0) #print(listOfIntersect) if (length(listOfIntersect)>1) { print("to be grouped") if (all(AFMImageNetworksAnalysis@circlesTable[listOfIntersect,]$group==0)) { groupNumber<-groupNumber+1 AFMImageNetworksAnalysis@circlesTable[listOfIntersect,]$group<-groupNumber }else{ print("special") #print(AFMImageNetworksAnalysis@circlesTable[listOfIntersect&group!=0]) existingGroupNumber<-AFMImageNetworksAnalysis@circlesTable[listOfIntersect,][group!=0,][1]$group AFMImageNetworksAnalysis@circlesTable[listOfIntersect,]$group<-existingGroupNumber } #print(AFMImageNetworksAnalysis@circlesTable[listOfIntersect]) } } AFMImageNetworksAnalysis@circlesTable nbOfNodesToFusion<-length(unique(AFMImageNetworksAnalysis@circlesTable[group!=0,]$group)) nbOfNodesToFusion if (nbOfNodesToFusion>0) { # define new coordinates for all points # wh<-which(AFMImageNetworksAnalysis@circlesTable$group==0) # AFMImageNetworksAnalysis@circlesTable[wh]$new_lat<-AFMImageNetworksAnalysis@circlesTable[wh]$lat # AFMImageNetworksAnalysis@circlesTable[wh]$new_lon<-AFMImageNetworksAnalysis@circlesTable[wh]$lon # AFMImageNetworksAnalysis@circlesTable AFMImageNetworksAnalysis@circlesTable[, mean_lon:=floor(mean(lon)), by=group] AFMImageNetworksAnalysis@circlesTable[, mean_lat:=floor(mean(lat)), by=group] AFMImageNetworksAnalysis@circlesTable[group==0, mean_lon:=lon] AFMImageNetworksAnalysis@circlesTable[group==0, mean_lat:=lat] AFMImageNetworksAnalysis@circlesTable # define edge correspondance table newvedges<-data.table(vertexId=getVertexId(AFMImageNetworksAnalysis@binaryAFMImage, AFMImageNetworksAnalysis@circlesTable[group!=0,]$lon, AFMImageNetworksAnalysis@circlesTable[group!=0,]$lat), new_vertexId=getVertexId(AFMImageNetworksAnalysis@binaryAFMImage, AFMImageNetworksAnalysis@circlesTable[group!=0,]$mean_lon, AFMImageNetworksAnalysis@circlesTable[group!=0,]$mean_lat)) newvedges setkey(newvedges, vertexId) # tranform the isolated nodes isolates<-AFMImageNetworksAnalysis@isolatedNodesList isolates %in% newvedges$vertexId newvedges onewh<-which(isolates %in% newvedges$vertexId) for(index in onewh) { print(index) oldvertexId<-isolates[index] print(oldvertexId) newVertexId<-newvedges[vertexId %in% oldvertexId]$new_vertexId print(newVertexId) isolates<-replace(isolates, isolates==oldvertexId, as.character(newVertexId)) } isolates<-unique(isolates) isolates # tranform the edges with the fusioned edge newvedges2<-copy(AFMImageNetworksAnalysis@edgesTable) newvedges2 onewh<-which(newvedges2$from %in% newvedges$vertexId) for(index in onewh) { print(index) oldvertexId<-newvedges2[index,]$from print(oldvertexId) newVertexId<-newvedges[vertexId %in% oldvertexId,]$new_vertexId print(newVertexId) newvedges2[index, from:=as.character(newVertexId)] } newvedges2 onewh<-which(newvedges2$to %in% newvedges$vertexId) for(index in onewh) { print(index) oldvertexId<-newvedges2[index,]$to print(oldvertexId) newVertexId<-newvedges[vertexId %in% oldvertexId,]$new_vertexId print(newVertexId) newvedges2[index, to:=as.character(newVertexId)] } newvedges2 }else{ newvedges2<-vedges } #print(newvedges2) AFMImageNetworksAnalysis@fusionedNodesCorrespondance<-copy(newvedges) if (typeof(newvedges2) %in% c("data.table")) { AFMImageNetworksAnalysis@fusionedNodesEdgesTable<-copy(newvedges2) }else{ AFMImageNetworksAnalysis@fusionedNodesEdgesTable<-copy(AFMImageNetworksAnalysis@edgesTable) } return(AFMImageNetworksAnalysis) } #' identify isolated nodes comparing the list of edges and the list of nodes #' #' @param AFMImageNetworksAnalysis the AFMImageNetworksAnalysis instance #' @return the updated instance of AFMImageNetworksAnalysis #' @export #' @author M.Beauvais identifyIsolatedNodes<-function(AFMImageNetworksAnalysis) { if (!(is.list(AFMImageNetworksAnalysis@circlesTable) & length(AFMImageNetworksAnalysis@circlesTable) == 0)) { isolates<-getVertexId(AFMImageNetworksAnalysis@binaryAFMImage, AFMImageNetworksAnalysis@circlesTable$lon, AFMImageNetworksAnalysis@circlesTable$lat) print(isolates) vedges<-AFMImageNetworksAnalysis@edgesTable AFMImageNetworksAnalysis@isolatedNodesList<-isolates[!isolates %in% vedges$from & !isolates %in% vedges$to] }else{ warning("no treatment - no circle identified") } return(AFMImageNetworksAnalysis) } # AFMImageNetworksAnalysis<-identifyNodesWithCircles(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis) # AFMImageNetworksAnalysis<-identifyEdgesFromCircles(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis) # AFMImageNetworksAnalysis<-identifyIsolatedNodes(AFMImageNetworksAnalysis) # AFMImageNetworksAnalysis<-getEdgesAfterNodesFusion(AFMImageNetworksAnalysis) #' calculate the physical distances between nodes #' #' @param pathVidVector a network path #' @param hscale the hscale of the \code{\link{AFMImage}} from Atomic Force Microscopy #' @param vscale the vscale of the \code{\link{AFMImage}} from Atomic Force Microscopy #' @return the physical distance the extrmities of the path #' @export #' @author M.Beauvais calculatePhysicalDistanceFromPath<-function(pathVidVector, hscale, vscale) { physicalDistance<-0 vid1<-pathVidVector[1] for (pathInd in seq(2, length(pathVidVector))) { vid2<-pathVidVector[pathInd] vid1Coords<-getCoordinatesFromVertexId(vid1) vid2Coords<-getCoordinatesFromVertexId(vid2) physicalDistance<-physicalDistance+sqrt((hscale*(vid1Coords$coords.x1-vid2Coords$coords.x1))^2+(vscale*(vid1Coords$coords.x2-vid2Coords$coords.x2))^2) vid1<-pathVidVector[pathInd] } return(physicalDistance) } # TODO check if strsplit return results #path<-strsplit(directedConnectedNodesDT[1,]$shortest_path,"-")[[1]] #calculatePhysicalDistanceFromPath(newAFMImage, path) #' create the igraph weighted graph from the nodes and edges #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @export #' @author M.Beauvais createGraph<-function(AFMImageNetworksAnalysis) { keep<-NULL if (!(is.list(AFMImageNetworksAnalysis@circlesTable) & length(AFMImageNetworksAnalysis@circlesTable) == 0)) { isolatedNodesList<-AFMImageNetworksAnalysis@isolatedNodesList from<-to<-NULL ultimateNetwork<-copy(AFMImageNetworksAnalysis@edgesTable[keep %in% c(TRUE),]) isolates<-isolatedNodesList[!isolatedNodesList %in% ultimateNetwork$from & !isolatedNodesList %in% ultimateNetwork$to] totalVerticesNumber<-length(unique(c(ultimateNetwork$from, ultimateNetwork$to, isolates))) print(paste("totalVerticesNumber:",totalVerticesNumber)) listOfVertices<-unique(c(ultimateNetwork$from, ultimateNetwork$to, isolates)) names(listOfVertices)<-unique(c(ultimateNetwork$from, ultimateNetwork$to, isolates)) alledges2<-as.vector(t(matrix(c(ultimateNetwork$from,ultimateNetwork$to),ncol=2))) g<-graph(edges=alledges2, directed=FALSE, isolates=isolates) E(g)$weight <-ultimateNetwork$dist AFMImageNetworksAnalysis@skeletonGraph<-g ultimateNetwork<-copy(AFMImageNetworksAnalysis@edgesTable) isolates<-isolatedNodesList[!isolatedNodesList %in% ultimateNetwork$from & !isolatedNodesList %in% ultimateNetwork$to] listOfVertices<-unique(c(ultimateNetwork$from, ultimateNetwork$to, isolates)) names(listOfVertices)<-unique(c(ultimateNetwork$from, ultimateNetwork$to, isolates)) alledges2<-as.vector(t(matrix(c(ultimateNetwork$from,ultimateNetwork$to),ncol=2))) g<-graph(edges=alledges2, directed=FALSE, isolates=isolates) E(g)$weight <-ultimateNetwork$dist AFMImageNetworksAnalysis@originalGraph<-g }else{ warning("no treatment - no circle identified") } return(AFMImageNetworksAnalysis) } #' calculate the shortest path between adjacent nodes #' #' Calculate the shortest path between all nodes of degree different to 2 #' that are connected with nodes of degree equal to 2 #' Calculate the distance between the above nodes. #' #' @param ... cl: a cluster object from the parallel package #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @export #' @author M.Beauvais calculateShortestPaths<-function(...,AFMImageNetworksAnalysis) { force(AFMImageNetworksAnalysis) if ((is.list(AFMImageNetworksAnalysis@circlesTable) & length(AFMImageNetworksAnalysis@circlesTable) == 0)) { warning("not treatment - no circle identified") }else{ args<-names(list(...)) print(args) if (is.null(args)) { clExist<-FALSE }else{ clExist<-c(match('cl',args)!=-1) cl<-cl } if (clExist) { print("using parallel") requireNamespace("parallel") }else { print("no parallel") } workerFunc <- function(vid1index, g, hscale, vscale, nodesAnalysisDT) { requireNamespace("igraph") requireNamespace("data.table") requireNamespace("AFM") directedConnectedNodesDT<-data.table(vid1=c(""), vid1NodeDegree= c(0), vid2=c(""), vid2NodeDegree= c(0), numberOfNodesInShortestPath=c(0), shortest_path=c(""), physicalDistance=c(0)) tryCatch({ nb<-0 print(paste0(vid1index," / ", nrow(nodesAnalysisDT))) #TODO calculate distance matrix # vid2index by distance # when nbOfShortestPath for vid1 reach degree of node then break nbOfShortestPath<-0 vid1Node<-nodesAnalysisDT[vid1index,] vid1<- vid1Node$vid vid1NodeDegree<-vid1Node$node_degree for (vid2index in seq(1, nrow(nodesAnalysisDT))) { #print(paste0(vid1index," / ", nrow(nodesAnalysisDT),"-",vid2index)) vid2Node<-nodesAnalysisDT[vid2index,] vid2<- vid2Node$vid vid2NodeDegree<-vid2Node$node_degree if (!vid1 %in% vid2) { allPath<-all_shortest_paths(g, vid1, vid2) #print(allPath) #print(is.null(allPath$res)) if (length(allPath$res)>0) { for(pathIndex in seq(1,length(allPath$res))) { #if (length(allPath$res)>0) { path<-allPath$res[[pathIndex]]$name #TODO is it working if two points are in separate graph ? numberOfNodesInShortestPath<-length(path) #print(allPath$res[[1]]$name %in% nodesAnalysisDT$vid) #all(nodesAnalysisDT[path[2:(length(path)-1)],]$node_degree) # are all the intermediary nodes of degree equal to 2 #numberOfNodesInShortestPath<-length(which(path %in% nodesAnalysisDT$vid == TRUE)) if (all(path[2:(length(path)-1)] %in% nodesAnalysisDT$vid == FALSE)) { print(c("interresting", vid1, vid2)) print(path) #nbOfShortestPath<-nbOfShortestPath+1 physicalDistance<-calculatePhysicalDistanceFromPath(path, hscale, vscale) print(physicalDistance) #totalPhysicalDistance<-totalPhysicalDistance+physicalDistance # TODO fill with reverse path directedConnectedNodesDT<-rbindlist(list(directedConnectedNodesDT, data.table(vid1=vid1, vid1NodeDegree=vid1NodeDegree, vid2=vid2, vid2NodeDegree = vid2NodeDegree, numberOfNodesInShortestPath=numberOfNodesInShortestPath, shortest_path=paste0(path, collapse = "-"), physicalDistance=physicalDistance))) print("all") } } } } } }, error = function(e) { print("error") }) return(directedConnectedNodesDT[-1,]) } # start hscale<-AFMImageNetworksAnalysis@binaryAFMImage@hscansize/AFMImageNetworksAnalysis@binaryAFMImage@samplesperline vscale<-AFMImageNetworksAnalysis@binaryAFMImage@vscansize/AFMImageNetworksAnalysis@binaryAFMImage@lines g<-AFMImageNetworksAnalysis@skeletonGraph node_degree<-NULL verticesAnalysisDT<-data.table(vid=V(g)$name, node_degree=unname(degree(g))) nodesAnalysisDT<-copy(verticesAnalysisDT[node_degree!=2 & node_degree!=0,]) values <- seq(1, nrow(nodesAnalysisDT)) print("calculating shortest paths") print(paste(nrow(nodesAnalysisDT), "calls", nrow(nodesAnalysisDT)^2,"loops")) start.time <- Sys.time() print(start.time) if (clExist) { cl<-cl parallel::clusterEvalQ(cl , c(library("data.table"),library("igraph"), library("AFM"))) parallel::clusterExport(cl, c("g","hscale", "vscale", "nodesAnalysisDT"),envir=environment()) res <- parallel::parLapply(cl, values, workerFunc, g, hscale, vscale, nodesAnalysisDT) }else{ res <- lapply(values, workerFunc, g, hscale, vscale, nodesAnalysisDT) } end.time <- Sys.time() time.taken <- end.time - start.time print(paste0("time.taken: ",time.taken)) directedConnectedNodesDT<-rbindlist(res) AFMImageNetworksAnalysis@shortestPaths<-directedConnectedNodesDT } return(AFMImageNetworksAnalysis) } #' get the networks parameters #' #' Calculate and return the networks parameters #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param AFMImage a \code{\link{AFMImage}} #' @return a data.table with all the parameters #' @export #' @author M.Beauvais calculateNetworkParameters<-function(AFMImageNetworksAnalysis, AFMImage) { vid<-node_degree<-hist<-physicalDistance<-vid1NodeDegree<-vid2NodeDegree<-NULL if ((is.list(AFMImageNetworksAnalysis@circlesTable) & length(AFMImageNetworksAnalysis@circlesTable) == 0)) { warning("not treatment - no circle identified") }else{ # samplename<-basename(AFMImageNetworksAnalysis@binaryAFMImage@fullfilename) g<-AFMImageNetworksAnalysis@skeletonGraph verticesAnalysisDT<-data.table(vid=V(g)$name, node_degree=unname(degree(g))) # verticesAnalysisDT # nrow(verticesAnalysisDT) # nrow(verticesAnalysisDT[node_degree>4]) # nrow(verticesAnalysisDT[node_degree==1]) param<-getRoughnessParameters(AFMImage) param numberOfNodesPerArea<-(nrow(verticesAnalysisDT[node_degree!=2,]))/param$area numberOfNodesPerSurfaceArea<-(nrow(verticesAnalysisDT[node_degree!=2,]))/param$surfaceArea ggplot(verticesAnalysisDT, aes(node_degree)) +geom_histogram( binwidth=1, fill=NA, color="black") + theme_bw() #nicer looking directedConnectedNodesDT<-AFMImageNetworksAnalysis@shortestPaths directedConnectedNodesDT mean(directedConnectedNodesDT$physicalDistance) hist(directedConnectedNodesDT$physicalDistance) ggplot(directedConnectedNodesDT, aes(physicalDistance)) +geom_histogram( binwidth=50, fill=NA, color="black") + theme_bw() #nicer looking max(directedConnectedNodesDT$physicalDistance) #Total Number of nodes totalNumberOfNodes<-nrow(verticesAnalysisDT[node_degree!=2,]) #Number of nodes with degree > 2 totalNumberOfNodesWithDegreeThreeOrMorePerArea<-nrow(verticesAnalysisDT[node_degree>2,])/param$area #Number of nodes with degree = 1 totalNumberOfNodesWithDegreeOnePerArea<-nrow(verticesAnalysisDT[node_degree==1,])/param$area # Number Of Isolated Nodes NumberOfIsolatedNodesPerArea<-length(AFMImageNetworksAnalysis@isolatedNodesList)/param$area #Surface area<-param$area #Surface area of a grid of heights surfaceArea<-param$surfaceArea #Nodes (degree>2 or =1) / area numberOfNodesPerArea<-(nrow(verticesAnalysisDT[node_degree!=2,]))/param$area #Nodes (degree>2 or =1) / surface area numberOfNodesPerSurfaceArea<-(nrow(verticesAnalysisDT[node_degree!=2,]))/param$surfaceArea #Mean physical distance between nodes (degree!=2) MeanPhysicalDistanceBetweenNodes<-mean(directedConnectedNodesDT$physicalDistance) tryCatch({ # calculate distance between Highly Connected nodes and terminal nodes MeanPhysicalDistanceToTerminalNodes<-mean(directedConnectedNodesDT[(vid1NodeDegree>2&vid2NodeDegree==1),]$physicalDistance) }, error=function(cond) { MeanPhysicalDistanceToTerminalNodes<-NA }) tryCatch({ # calculate distance between Highly Connected nodes MeanPhysicalDistanceBetweenHighlyConnectedNodes<-mean(directedConnectedNodesDT[(vid1NodeDegree>2&vid2NodeDegree>2),]$physicalDistance) }, error=function(cond) { MeanPhysicalDistanceBetweenHighlyConnectedNodes<-NA }) tryCatch({ # calculate distance between terminal nodes MeanPhysicalDistanceBetweenTerminalNodes<-mean(directedConnectedNodesDT[(vid1NodeDegree==1&vid2NodeDegree==1),]$physicalDistance) }, error=function(cond) { MeanPhysicalDistanceBetweenTerminalNodes<-NA }) #Mean physical size of nodes MeanPhysicalSizeOfHighlyConnectedNodes<-mean(AFMImageNetworksAnalysis@circlesTable[vid %in% verticesAnalysisDT[node_degree>2,]$vid,]$circleRadius) SDPhysicalSizeOfHighlyConnectedNodes<-sd(AFMImageNetworksAnalysis@circlesTable[vid %in% verticesAnalysisDT[node_degree>2,]$vid,]$circleRadius) MeanPhysicalSizeOfTerminalNodes<-mean(AFMImageNetworksAnalysis@circlesTable[vid %in% verticesAnalysisDT[node_degree==1,]$vid,]$circleRadius) SDPhysicalSizeOfTerminalNodes<-sd(AFMImageNetworksAnalysis@circlesTable[vid %in% verticesAnalysisDT[node_degree==1,]$vid,]$circleRadius) # graph density print("calculating graph density") graphDensity<-graph.density(g) print(graphDensity) # Global cluster coefficient: (close triplets/all triplets) graphTransitivity<-transitivity(g, type="global") edgeConnectivity<-edge.connectivity(g) # Same as graph adhesion graphAdhesion=graph.adhesion(g) # Diameter of the graph graphDiameter<-max(directedConnectedNodesDT$physicalDistance) # Reciprocity of the graph graphReciprocity<-reciprocity(g) # Number of islands NumberOfIslands<-clusters(g)$no NumberOfIslandsPerArea<-clusters(g)$no/param$area # in sociology theory # gate keepers: low Eigenvector centrality and high Betweenness centrality , # contact with important nodes: high Eigenvector centrality and low Betweenness centrality graphEvcent<-evcent(g)$vector graphBetweenness<-betweenness(g) #displaygridIgraphPlot(AFMImageNetworksAnalysis) # paramDT<-data.table( # area=area, # surfaceArea=surfaceArea) # paramDT resultDT=data.table(NumberOfIslandsPerArea=NumberOfIslandsPerArea, NumberOfIsolatedNodesPerArea=NumberOfIsolatedNodesPerArea, totalNumberOfNodes=totalNumberOfNodes, totalNumberOfNodesWithDegreeThreeOrMorePerArea=totalNumberOfNodesWithDegreeThreeOrMorePerArea, totalNumberOfNodesWithDegreeOnePerArea=totalNumberOfNodesWithDegreeOnePerArea, totalNumberOfNodesPerArea=numberOfNodesPerArea, totalNumberOfNodesPerSurfaceArea=numberOfNodesPerSurfaceArea, MeanPhysicalDistanceBetweenNodes=MeanPhysicalDistanceBetweenNodes, MeanPhysicalDistanceToTerminalNodes=MeanPhysicalDistanceToTerminalNodes, MeanPhysicalDistanceBetweenHighlyConnectedNodes=MeanPhysicalDistanceBetweenHighlyConnectedNodes, MeanPhysicalDistanceBetweenTerminalNodes=MeanPhysicalDistanceBetweenTerminalNodes, MeanPhysicalSizeOfHighlyConnectedNodes=MeanPhysicalSizeOfHighlyConnectedNodes, SDPhysicalSizeOfHighlyConnectedNodes=SDPhysicalSizeOfHighlyConnectedNodes, MeanPhysicalSizeOfTerminalNodes=MeanPhysicalSizeOfTerminalNodes, SDPhysicalSizeOfTerminalNodes=SDPhysicalSizeOfTerminalNodes, graphDiameter=graphDiameter, graphDensity=graphDensity, graphTransitivity=graphTransitivity, edgeConnectivity=edgeConnectivity, graphAdhesion=graphAdhesion, graphReciprocity=graphReciprocity) #resultDT AFMImageNetworksAnalysis@networksCharacteristics<-resultDT AFMImageNetworksAnalysis@graphEvcent<-graphEvcent AFMImageNetworksAnalysis@graphBetweenness<-graphBetweenness } return(AFMImageNetworksAnalysis) } #' get the networks parameters #' #' Calculate the holes characteristics #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @return a data.table with all the parameters #' @export #' @author M.Beauvais calculateHolesCharacteristics<-function(AFMImageNetworksAnalysis) { # holes statistics holesIslandsDT<-getHolesStatistics(AFMImageNetworksAnalysis@binaryAFMImage) #numberOfHoles<-unique(holesIslandsDT$cluster) holeStats<-holesIslandsDT[,.N,by="cluster"] AFMImageNetworksAnalysis@holes<-holesIslandsDT AFMImageNetworksAnalysis@holesCharacteristics<-holeStats return(AFMImageNetworksAnalysis) } #' removeNode #' #' remove a node from an AFMImage #' #' @param circleAFMImage a \code{\link{AFMImage}} #' @param nodeDT a data.table lon lat circleRadius #' @return an \code{\link{AFMImage}} #' @author M.Beauvais removeNode<-function(circleAFMImage, nodeDT) { #print(paste("removing",nrow(nodeDT), "nodes")) for (i in seq(1, nrow(nodeDT))){ circleRadius<-nodeDT[i,]$circleRadius center<-c(nodeDT[i,]$lat, nodeDT[i,]$lon) circleRadius2=circleRadius #+BIGGER_CIRCLE_RADIUS blockSize2=circleRadius2+1 #*BIGGER_CIRCLE_RADIUS_MULTILPLIER+1 circleCenter<-c(circleRadius2, circleRadius2) circlePts = SpatialPoints(cbind(rep(1:(blockSize2),blockSize2), rep(1:(blockSize2),1,each= blockSize2))) circlenm <- sp::spDistsN1(pts=circlePts, pt=circleCenter, longlat=FALSE) pts = SpatialPoints(cbind(rep(0:(blockSize2-1),blockSize2)+center[1]-circleRadius2, rep(0:(blockSize2-1),1,each= blockSize2)+center[2]-circleRadius2)) pts<-pts[pts$coords.x1>0&pts$coords.x1<circleAFMImage@lines&pts$coords.x2>0&pts$coords.x2<circleAFMImage@samplesperline] nm <- sp::spDistsN1(pts=pts, pt=center, longlat=FALSE) listOfPointsInsideCircle<-pts[nm<=circleRadius] #circleAFMImage@data$h[listOfPointsInsideCircle$coords.x2+1+(listOfPointsInsideCircle$coords.x1)*circleAFMImage@samplesperline]<-0 circleAFMImage@data$h[listOfPointsInsideCircle$coords.x1+1+(listOfPointsInsideCircle$coords.x2)*circleAFMImage@samplesperline]<-0 } return(circleAFMImage) } #' addNode #' #' add a node to an AFMImage #' #' @param circleAFMImage a \code{\link{AFMImage}} #' @param nodeDT nodeDT a data.table lon lat circleRadius #' @param filterIndex an integer #' @return an \code{\link{AFMImage}} #' @author M.Beauvais addNode<-function(circleAFMImage, nodeDT,filterIndex) { #print(paste("adding",nrow(nodeDT), "nodes")) for (i in seq(1, nrow(nodeDT))){ circleRadius<-nodeDT[i,]$circleRadius center<-c(nodeDT[i,]$lon, nodeDT[i,]$lat) circleRadius2=circleRadius+BIGGER_CIRCLE_RADIUS blockSize2=circleRadius2*BIGGER_CIRCLE_RADIUS_MULTILPLIER+1 circleCenter<-c(circleRadius2, circleRadius2) circlePts = SpatialPoints(cbind(rep(1:(blockSize2),blockSize2), rep(1:(blockSize2),1,each= blockSize2))) circlenm <- sp::spDistsN1(pts=circlePts, pt=circleCenter, longlat=FALSE) pts = SpatialPoints(cbind(rep(0:(blockSize2-1),blockSize2)+center[1]-circleRadius2, rep(0:(blockSize2-1),1,each= blockSize2)+center[2]-circleRadius2)) pts<-pts[pts$coords.x1>0&pts$coords.x1<circleAFMImage@lines&pts$coords.x2>0&pts$coords.x2<circleAFMImage@samplesperline] nm <- sp::spDistsN1(pts=pts, pt=center, longlat=FALSE) listOfPointsInsideCircle<-pts[nm<=circleRadius] circleAFMImage@data$h[listOfPointsInsideCircle$coords.x1+1+(listOfPointsInsideCircle$coords.x2)*circleAFMImage@samplesperline]<-circleAFMImage@samplesperline+filterIndex*10 } return(circleAFMImage) } #' removeLonguestEdge #' #' Find and remove the longuest edge if it is unique #' #' @param k an integer #' @param res res ? #' @param sides data.table #' @param myRes data.table? #' @param vertex1 a vertex ? #' @return a data.table with from, to #' @author M.Beauvais removeLonguestEdge<-function(k, res, sides, myRes, vertex1) { from<-to<-NULL thirdSide<-sides[k,] thirdSide secondSide <- myRes[(from %in% c(thirdSide$from) & to %in% c(vertex1)) | (to %in% c(thirdSide$from)&(from %in% c(vertex1))),] secondSide firstSide <- myRes[(from %in% c(thirdSide$to) & to %in% c(vertex1)) | (to %in% c(thirdSide$to)&(from %in% c(vertex1))),] firstSide distV<-rbind(firstSide, secondSide, thirdSide) if (nrow(distV)<3) return(res[-1,]) # print(distV) maxDistV<-which.max(distV$dist) maxDistV #print(distV[maxDistV,]) if(length(unique(distV$dist))>1) { maxDistVal<-distV[maxDistV,]$dist if (nrow(distV[dist %in% maxDistVal,])==1) { res<-rbind(res,data.table(distV[maxDistV,c("from","to"),with=FALSE])) } } return(res[-1,]) } #' getMaxCircleMatrix #' #' for each pixel of the image, #' if the pixel is not empty #' try to place one circle #' start with biggets circle #' as soon as a circle is found the circle, the pixel is associated with with the circle raidus #' #' @param ... cl: a cluster object from the parallel package #' @param newCircleAFMImage a \code{\link{AFMImage}} #' @param CIRCLE_RADIUS_INIT CIRCLE_RADIUS_INIT #' @return res a matrix #' @export #' @author M.Beauvais getMaxCircleMatrix<-function(..., newCircleAFMImage, CIRCLE_RADIUS_INIT) { x<-y<-NULL args<-names(list(...)) print(args) if (is.null(args)) { print("not using parallel for getMaxCircleMatrix") clExist<-FALSE }else{ print("using parallel for getMaxCircleMatrix") clExist<-c(match('cl',args)!=-1) cl<-cl } binaryAFMImageMatrix<-matrix(newCircleAFMImage@data$h, ncol=newCircleAFMImage@samplesperline) maxCircleRadiusMatrix<-matrix(data=rep(0,newCircleAFMImage@samplesperline*newCircleAFMImage@lines), nrow=newCircleAFMImage@lines, ncol=newCircleAFMImage@samplesperline) initialAllXY<-data.table(which(binaryAFMImageMatrix!=0,arr.ind = T)) colnames(initialAllXY)<-c("x","y") setkey(initialAllXY, x) initialAllXY$x<-as.numeric(initialAllXY$x) initialAllXY$y<-as.numeric(initialAllXY$y) #matrixElementsDT<-data.table(x=c(0),y=c(0),radius=c(0)) rm(matrixElementsDT) circleRadius<-CIRCLE_RADIUS_INIT #circleRadius<-4 iteration<-0 #rm(avgDT) start.time <- Sys.time() print(start.time) while(circleRadius>1) { iteration=iteration+1 circleRadius=circleRadius-1 blockSize<-circleRadius*2+1 allXY <- copy(initialAllXY[x<=newCircleAFMImage@samplesperline-blockSize&y<=newCircleAFMImage@lines-blockSize]) print(paste0("circleRadius:",circleRadius)) print(paste0(nrow(allXY)," loops")) if ((blockSize>newCircleAFMImage@samplesperline)|((blockSize-1)>newCircleAFMImage@lines)) { print(paste0("too big blockSize", blockSize)) }else{ circleCenter<-c(circleRadius, circleRadius) circlePts = sp::SpatialPoints(cbind(rep(1:(blockSize),blockSize), rep(1:(blockSize),1,each= blockSize))) circlenm <- sp::spDistsN1(pts = circlePts, pt = circleCenter, longlat=FALSE) if(clExist) { #cl<-cl parallel::clusterEvalQ(cl , c(library("data.table"),library("sp"), library("AFM"),library("parallel"))) parallel::clusterExport(cl, c("allXY","newCircleAFMImage","binaryAFMImageMatrix","maxCircleRadiusMatrix","circleRadius","circlenm"), envir=environment()) matrixElements<-parallel::parLapply(cl, 1:nrow(allXY),identifyMaxCircleRadius, allXY, newCircleAFMImage, binaryAFMImageMatrix,maxCircleRadiusMatrix,circleRadius,circlenm) }else{ matrixElements<-lapply(1:nrow(allXY),identifyMaxCircleRadius, allXY, newCircleAFMImage, binaryAFMImageMatrix,maxCircleRadiusMatrix,circleRadius,circlenm) } if (!exists("matrixElementsDT")) matrixElementsDT<-rbindlist(matrixElements) else matrixElementsDT<-rbind(matrixElementsDT, rbindlist(matrixElements)) #print(matrixElementsDT) # setkeyv(allXY, c("x","y")) # setkeyv(matrixElementsDT, c("x","y")) initialAllXY<-data.table::fsetdiff(x = initialAllXY, y=matrixElementsDT[,1:2,]) print(paste("elements left: ", nrow(initialAllXY))) } } end.time <- Sys.time() print(paste0("start.time: ",start.time)) print(paste0("end.time: ",end.time)) time.taken <- end.time - start.time print(paste0("time.taken: ",time.taken)) missingX<-setdiff(seq(1,newCircleAFMImage@samplesperline),unique(matrixElementsDT$x)) missingY<-setdiff(seq(1,newCircleAFMImage@lines),unique(matrixElementsDT$y)) matrixElementsDT<-rbind(matrixElementsDT, data.table(x=c(missingX,rep(1, length(missingY))),y=c(rep(1, length(missingX)), missingY),radius=c(rep(0, length(missingX)+length(missingY)))) ) res <- as.matrix(dcast.data.table(data=matrixElementsDT, x ~ y, value.var="radius", fun.aggregate = max, fill=0)[,-1, with=FALSE]) # res # max(res) return(res) } #' simplifyNetwork #' #' simplify the network keeping only the important edges #' #' @param ... cl: a cluster object from the parallel package #' @param allVertices a data.table of vertices #' @param allEdges a data.table of edges #' @return a data.table of edges #' @export #' @author M.Beauvais simplifyNetwork<-function(..., allVertices, allEdges){ from<-to<-fromto<-NULL args<-names(list(...)) print(args) if (is.null(args)) { clExist<-FALSE }else{ clExist<-c(match('cl',args)!=-1) } if (clExist) { print("using parallel") requireNamespace("parallel") } allVertices<-as.character(allVertices) allEdges$from<-as.character(allEdges$from) allEdges$to<-as.character(allEdges$to) # find triangles in a network and eliminate longuest edge # allVertices a vector of Vertices # allEdges a data.table with following columns: from, to, dist findTriangleAndEdgeToEliminate<-function(j,allVertices, allEdges) { requireNamespace("data.table") res<-data.table(from=c(0),to=c(0)) vertex1<-allVertices[j] #print(paste(j, vertex1)) # if (vertex1 %in% c(10485790) | vertex1 %in% c(11796515)) { # print("problem1") # } myRes<-allEdges[from %in% vertex1 | to %in% vertex1, ] # all nodes linked to Vertex allNodeLinkToVertex<-unique(c(myRes$from, myRes$to)) allNodeLinkToVertex<-allNodeLinkToVertex[! allNodeLinkToVertex %in% vertex1] sides<-allEdges[from %in% allNodeLinkToVertex & to %in% allNodeLinkToVertex,] # if (nrow(sides[from %in% c(9699366) | to %in% c(9699366),])>0) { # #53: 9699368 9699366 to 38 37 40 37 2.000000 TRUE 1 1 # print("Problem2") # } #finalRes<-lapply(1:1,removeLonguestEdge,res, sides, myRes, vertex1) finalRes<-lapply(1:nrow(sides),removeLonguestEdge,res, sides, myRes, vertex1) finalRes<-rbindlist(finalRes) # if (vertex1 %in% c(10485790) | vertex1 %in% c(11796515)) { # print("problem3") # } finalRes<-unique(finalRes) return(finalRes) } start.time <- Sys.time() print(start.time) if(clExist) { cl<-cl parallel::clusterEvalQ(cl , c(library("data.table"))) parallel::clusterExport(cl, c("allVertices", "allEdges"), envir=environment()) resRemoveEdge<-parallel::parLapply(cl, 1:length(allVertices),findTriangleAndEdgeToEliminate , allVertices=allVertices, allEdges=allEdges) }else{ resRemoveEdge<-lapply(1:length(allVertices),findTriangleAndEdgeToEliminate , allVertices=allVertices, allEdges=allEdges) } end.time <- Sys.time() time.taken <- end.time - start.time print(paste0("time.taken: ",time.taken)) resRemoveEdge<-rbindlist(resRemoveEdge) resRemoveEdge$fromto<-paste0(resRemoveEdge$from,"-",resRemoveEdge$to) setkey(resRemoveEdge, fromto) resRemoveEdge<-unique(resRemoveEdge) resRemoveEdge$fromto<-NULL print("resRemoveEdge") #print(resRemoveEdge) allEdges$keep<-rep(TRUE, nrow(allEdges)) allEdges$remove<-rep(FALSE, nrow(allEdges)) indexOfEdgeToBeRemoved<-1 for(indexOfEdgeToBeRemoved in seq(1,nrow(resRemoveEdge))) { allEdges[(allEdges$from %in% c(resRemoveEdge[indexOfEdgeToBeRemoved,]$from) & allEdges$to %in% c(resRemoveEdge[indexOfEdgeToBeRemoved,]$to)),]$remove<-TRUE allEdges[(allEdges$from %in% c(resRemoveEdge[indexOfEdgeToBeRemoved,]$from) & allEdges$to %in% c(resRemoveEdge[indexOfEdgeToBeRemoved,]$to)),]$keep<-FALSE } print("end simplifyNetwork") return(allEdges) } #' generatePolygonEnvelope #' #' generate a convex polygon from circles #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param centers a matrix ? #' @param radius a vector of radius #' @return a polygon #' @export #' @author M.Beauvais generatePolygonEnvelope<-function(AFMImageNetworksAnalysis, centers, radius){ #chull<-lines<-SpatialPolygons<-Polygons<-SpatialPointsDataFrame<-NULL # check if center and radius are in image # TBD binaryAFMImage<-AFMImageNetworksAnalysis@binaryAFMImage x1<-x2<-c() #i<-1 for (i in seq(1, nrow(centers))) { circleRadius<-radius[i] center<-centers[i,] # center<-c(10,30) # circleRadius<-9 if (circleRadius<0) { stop("getCircleSpatialPoints - the radius is inferior to 0") return() } if (circleRadius>0) { blockSize<-circleRadius*2+1 pts = sp::SpatialPoints(cbind(rep(1:blockSize,blockSize)+center[1]-circleRadius-1, rep(1:blockSize,1,each= blockSize)+center[2]-circleRadius-1)) #print(pts) pts<-pts[pts$coords.x1>0&pts$coords.x1<binaryAFMImage@lines&pts$coords.x2>0&pts$coords.x2<binaryAFMImage@samplesperline] #plot(pts) nm <- sp::spDistsN1(pts = matrix(c(pts$coords.x1, pts$coords.x2), ncol=2), pt=c(center[1], center[2]), longlat=FALSE) #print(nm) centerAllpoints<-pts[nm<=circleRadius] #plot(centerAllpoints) centerAllpoints<-SpatialPoints(cbind( c(centerAllpoints$coords.x1, center[1]), c(centerAllpoints$coords.x2, center[2]) )) x1<-c(x1,centerAllpoints$coords.x1) x2<-c(x2,centerAllpoints$coords.x2) # X<-cbind(x1,x2) # plot(X, cex = 0.5) }else{ # circleRadius == 0 centerAllpoints<-sp::SpatialPoints(cbind(center$lon, center$lat)) x1<-c(x1,center[2]) x2<-c(x2,center[1]) } } X<-cbind(x2,x1) ch <- chull(X) coords <- X[c(ch, ch[1]), ] # closed polygon #plot(X, cex = 0.5) #lines(coords) sp_poly <- sp::SpatialPolygons(list(sp::Polygons(list(sp::Polygon(coords)), ID=1))) return(sp_poly) } #envelope <- generatePolygonEnvelope(AFMImageNetworksAnalysis, centers, r) #' getAllPointsToRemove #' #' get the points inside envelope #' #' @param AFMImageNetworksAnalysis a \code{\link{AFMImageNetworksAnalysis}} #' @param envelope an envelope of points ? #' @return a data.table of points #' @export #' @author M.Beauvais getAllPointsToRemove<-function(AFMImageNetworksAnalysis, envelope) { #envelopeSR1=Polygons(list(Polygon(envelope$XY)),"r1") # sr=SpatialPolygons(list(envelopeSR1)) sr=envelope Lines<-AFMImageNetworksAnalysis@binaryAFMImage@lines Samplesperline<-AFMImageNetworksAnalysis@binaryAFMImage@samplesperline pts = cbind(rep(seq(1,Samplesperline, by= 1), times = Lines), rep(seq(1,Lines, by= 1), each = Samplesperline)) pts dimnames(pts)[[1]] = seq(1,Lines*Samplesperline) df = data.frame(a = seq(1,Lines*Samplesperline)) row.names(df) = seq(1,Lines*Samplesperline) #options(warn=1) # show warnings where they occur mySP<-sp::SpatialPointsDataFrame(pts, df, match.ID = TRUE) # don't warn # retrieve overlay per polygon: resOver<-sp::over(x=mySP, y=sr) resOver[!is.na(resOver)] vId<-as.integer(names(resOver[!is.na(resOver)])) HASHSIZE<-Samplesperline vertexId<-as.numeric(vId) y<-floor(vertexId/HASHSIZE) x<-vertexId-y*HASHSIZE return(data.table(vId=vId, coords.x1=x,coords.x2=y)) } #' identifyMaxCircleRadius #' #' identifyMaxCircleRadius #' #' @param i an integer #' @param allXY combinations of ? #' @param newCircleAFMImage a \code{\link{AFMImage}} #' @param binaryAFMImageMatrix a \code{\link{AFMImage}} #' @param maxCircleRadiusMatrix a matrix #' @param circleRadius a vector of radius ? #' @param circlenm a ? #' @return a data table with x,y,radius columns #' @author M.Beauvais identifyMaxCircleRadius<-function(i,allXY, newCircleAFMImage, binaryAFMImageMatrix,maxCircleRadiusMatrix,circleRadius,circlenm) { x<-allXY[i,]$x y<-allXY[i,]$y #print (paste(x,y,"center: ",x+circleRadius, y+circleRadius)) resDT<-data.table(x=c(0),y=c(0),radius=c(0)) blockSize<-circleRadius*2+1 if(binaryAFMImageMatrix[x+circleRadius,y+circleRadius]!=0) { if(maxCircleRadiusMatrix[x+circleRadius,y+circleRadius]==0) { tempMatrix<-binaryAFMImageMatrix[x:(x+blockSize),y:(y+blockSize)] if ((!anyNA(as.vector(tempMatrix)[circlenm<=circleRadius]))& (all(as.vector(tempMatrix)[circlenm<=circleRadius] == 1) == TRUE)) { #print (paste(x,y,"center: ",x+circleRadius, y+circleRadius)) resDT<-rbind(resDT,data.table(x=c(x+circleRadius),y=c(y+circleRadius),radius=c(circleRadius))) } } } return(resDT[-1,]) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMNetworksAnalyser.R
require("fftwtools") require("pracma") require("data.table") require("gstat") require(sp) require("stringr") # normality tests require(gridExtra) require(ggplot2) #require(reshape2) require(stats) if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("r", "roughness","x","predict.gstat")) #' @title AFM Image psd slope analysis #' #' @description \code{AFMImagePSDSlopesAnalysis} stores the analysis of the second slope in roughness against lenghtscale #' #' @slot lc to be removed ? #' @slot wsat to be removed ? #' @slot slope to be removed ? #' @slot yintersept to be removed ? #' @name AFMImagePSDSlopesAnalysis-class #' @rdname AFMImagePSDSlopesAnalysis-class #' @exportClass AFMImagePSDSlopesAnalysis #' @author M.Beauvais AFMImagePSDSlopesAnalysis<-setClass("AFMImagePSDSlopesAnalysis", slots = c(lc="numeric", wsat="numeric", slope="numeric", yintersept="numeric", tangente_point1="numeric", tangente_point2="numeric"), validity = function(object) { return(TRUE) } ) #' Constructor method of AFMImagePSDSlopesAnalysis Class. #' #' @param .Object an AFMImagePSDSlopesAnalysis object #' @rdname AFMImagePSDSlopesAnalysis-class #' @export setMethod("initialize", "AFMImagePSDSlopesAnalysis", function(.Object) { .Object@lc<-0 .Object@wsat<-0 .Object@slope<-0 .Object@yintersept<-0 .Object@tangente_point1<-0 .Object@tangente_point2<-0 validObject(.Object) ## valide l'objet return(.Object) }) #' Wrapper function AFMImagePSDSlopesAnalysis #' #' @rdname AFMImagePSDSlopesAnalysis-class #' @export AFMImagePSDSlopesAnalysis <- function() { return(new("AFMImagePSDSlopesAnalysis")) } #' @title AFM image Power Spectrum Density analysis class #' #' @description \code{AFMImagePSDAnalysis} handles an \code{\link{AFMImage}} roughness against lenghscale analysis #' #' @slot roughnessAgainstLengthscale a data.table to store the roughness against lengthscale data #' @slot intersections a list to store the lengthscales values as the intersections between slopes and the sill in roughness against lenghscale graph #' @slot updateProgress a function to update a graphical user interface #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class #' @author M.Beauvais AFMImagePSDAnalysis<-setClass("AFMImagePSDAnalysis", slots = c( psd1d_breaks="numeric", psd2d_truncHighLengthScale="logical", psd2d_maxHighLengthScale="numeric", psd1d="data.table", psd2d="data.table", roughnessAgainstLengthscale="data.table", intersections="numeric", AFMImagePSDSlopesAnalysis1="AFMImagePSDSlopesAnalysis", AFMImagePSDSlopesAnalysis2="AFMImagePSDSlopesAnalysis", updateProgress="function"), validity = function(object) { return(TRUE) } ) #' Constructor method of AFMImagePSDAnalysis Class. #' #' @param .Object an AFMImagePSDAnalysis object #' @rdname AFMImagePSDAnalysis-class #' @export setMethod("initialize", "AFMImagePSDAnalysis", function(.Object) { .Object@psd1d_breaks<-32 .Object@psd2d_truncHighLengthScale<-TRUE .Object@psd2d_maxHighLengthScale<-0 .Object@psd1d<-data.table() .Object@psd2d<-data.table() .Object@roughnessAgainstLengthscale<-data.table() validObject(.Object) ## valide l'objet return(.Object) }) #' Wrapper function AFMImagePSDAnalysis #' #' @rdname AFMImagePSDAnalysis-class #' @export AFMImagePSDAnalysis <- function() { return(new("AFMImagePSDAnalysis")) } #' Method \code{psd1d_breaks} returns a number of breaks to calculate PSD1D from PSD2D #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("psd1d_breaks",function(object){standardGeneric("psd1d_breaks")}) setGeneric(name= "psd1d_breaks<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("psd1d_breaks<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases psd1d_breaks #' @param object a \code{\link{AFMImagePSDAnalysis}} setMethod("psd1d_breaks",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@psd1d_breaks) } ) setReplaceMethod(f="psd1d_breaks", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "numeric"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@psd1d_breaks <- value return(AFMImagePSDAnalysis) }) #' Method \code{psd2d_maxHighLengthScale} returns the maximum lengthscale to be managed by PSD2D #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("psd2d_maxHighLengthScale",function(object){standardGeneric("psd2d_maxHighLengthScale")}) setGeneric(name= "psd2d_maxHighLengthScale<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("psd2d_maxHighLengthScale<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases psd2d_maxHighLengthScale setMethod("psd2d_maxHighLengthScale",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@psd2d_maxHighLengthScale) } ) setReplaceMethod(f="psd2d_maxHighLengthScale", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "numeric"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@psd2d_maxHighLengthScale <- value return(AFMImagePSDAnalysis) }) #' Method \code{psd2d_truncHighLengthScale} returns if the lengthscale of PSD2D should be truncated #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("psd2d_truncHighLengthScale",function(object){standardGeneric("psd2d_truncHighLengthScale")}) setGeneric(name= "psd2d_truncHighLengthScale<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("psd2d_truncHighLengthScale<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases psd2d_truncHighLengthScale setMethod("psd2d_truncHighLengthScale",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@psd2d_truncHighLengthScale) } ) setReplaceMethod(f="psd2d_truncHighLengthScale", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "logical"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@psd2d_truncHighLengthScale <- value return(AFMImagePSDAnalysis) }) #' Method \code{psd1d} returns a data.table of psd in 1D #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("psd1d",function(object){standardGeneric("psd1d")}) setGeneric(name= "psd1d<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("psd1d<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases psd1d setMethod("psd1d",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@psd1d) } ) setReplaceMethod(f="psd1d", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "data.table"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@psd1d <- value return(AFMImagePSDAnalysis) }) #' Method \code{psd2d} returns a data.table of psd in 1D #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("psd2d",function(object){standardGeneric("psd2d")}) setGeneric(name= "psd2d<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("psd2d<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases psd2d setMethod("psd2d",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@psd2d) } ) setReplaceMethod(f="psd2d", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "data.table"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@psd2d <- value return(AFMImagePSDAnalysis) }) #' Method \code{roughnessAgainstLengthscale} returns a data.table of roughnesses versus lengthscale #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("roughnessAgainstLengthscale",function(object){standardGeneric("roughnessAgainstLengthscale")}) setGeneric(name= "roughnessAgainstLengthscale<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("roughnessAgainstLengthscale<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases roughnessAgainstLengthscale setMethod("roughnessAgainstLengthscale",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@roughnessAgainstLengthscale) } ) setReplaceMethod(f="roughnessAgainstLengthscale", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "data.table"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@roughnessAgainstLengthscale <- value return(AFMImagePSDAnalysis) }) #' Method \code{intersections} returns a intersection numeric value #' @name AFMImagePSDAnalysis-class #' @rdname AFMImagePSDAnalysis-class setGeneric("intersections",function(object){standardGeneric("intersections")}) setGeneric(name= "intersections<-", def= function(AFMImagePSDAnalysis, value) { return(standardGeneric("intersections<-")) }) #' @rdname AFMImagePSDAnalysis-class #' @aliases intersections setMethod("intersections",signature=signature(object='AFMImagePSDAnalysis'), function(object) { return(object@intersections) } ) setReplaceMethod(f="intersections", signature(AFMImagePSDAnalysis = "AFMImagePSDAnalysis", value = "numeric"), definition= function(AFMImagePSDAnalysis, value) { AFMImagePSDAnalysis@intersections <- value return(AFMImagePSDAnalysis) }) #' Shift the quadrants of the FFT 2D #' #' \code{shiftFFT2D} returns the FFT 2D matrix shifted to put zero frequencies in the middle. #' #' @param fft2data the FFT 2D of the AFM image #' @return The shifted matrix #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(fftwtools) #' #' data(AFMImageOfNormallyDistributedHeights) #' AFMImage<-AFMImageOfNormallyDistributedHeights #' nMheightsData= matrix(AFMImage@@data$h, nrow=AFMImage@@samplesperline) #' shiftedFFT2D<-shiftFFT2D(fftwtools::fftw2d(nMheightsData)) #' } shiftFFT2D<-function(fft2data) { N=nrow(fft2data) M=ncol(fft2data) halfN=N/2 halfM=M/2 quadrant1=fft2data[1:halfN, seq(1,halfM)] quadrant2=fft2data[seq(halfN+1,N), seq(1,halfM)] quadrant3=fft2data[seq(halfN+1,N),seq(halfM+1,M)] quadrant4=fft2data[seq(1,halfN),seq(halfM+1,M)] return(rbind(cbind(quadrant3,quadrant2),cbind(quadrant4, quadrant1))) } # zeroPadShiftedFFT2D<-function(shiftedFFT2Ddata){ # N=nrow(fft2data) # M=ncol(fft2data) # # r = 2^(ceil(log2(x))) # } #' Calculate the shifted PSD matrix #' #' \code{shiftedPSDuv} returns the Power Spectral Density matrix in the frequency space from shifted FFT 2D #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return (1/NM^2) * abs(shiftedFFT2Ddata)^2) with N the number of lines of the sample and M the number of samples per line of the sample #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfRegularPeaks) #' AFMImage<-AFMImageOfRegularPeaks #' nMheightsData= matrix(AFMImage@@data$h, nrow=AFMImage@@samplesperline) #' shiftedPSDuv<-shiftedPSDuv(AFMImage) #' a=AFMImage@@scansize #' b=AFMImage@@scansize #' #' M=AFMImage@@sampsline #' N=AFMImage@@lines #' NM=N*M # pixels^2 #' MN = M*N #' A=a*b #' ab=a*b #' #' dx=a/M #' dy=b/N #' #' um = seq( (1-(M+1)/2)/(M*dx), (M-(M+1)/2)/(M*dx), by=1/(M*dx)) #' vn = seq( (1-(N+1)/2)/(N*dy), (N-(N+1)/2)/(N*dy), by=1/(N*dy)) #' x = rep(um, times = AFMImage@@lines) #' y = rep(vn, each = AFMImage@@sampsline) #' z = as.vector(shiftedPSDuv) #' #' data<-data.frame(x=x, y=y, z=z) #' #' p5 <- qplot(x, y, data=data, colour=log10(z)) #' p5 <- p5 + scale_colour_gradientn(colours = rainbow(7)) #' p5 <- p5 + ylab("v") #' p5 <- p5 + xlab("u") #' title<-paste("shifted PSD of", basename(AFMImage@@fullfilename)) #' p5 <- p5 + ggtitle(title) #' # Hide all the horizontal gridlines #' p5 <- p5 + theme(panel.grid.minor.x=element_blank(), panel.grid.major.x=element_blank()) #' # Hide all the vertical gridlines #' p5 <- p5 + theme(panel.grid.minor.y=element_blank(), panel.grid.major.y=element_blank()) #' p5 <- p5 + theme(panel.background = element_rect(fill = 'white', colour = 'black')) #' p5 #' } shiftedPSDuv<-function(AFMImage) { nMheighData= matrix(AFMImage@data$h, nrow=AFMImage@samplesperline) shiftedFFT2Ddata = shiftFFT2D(fftwtools::fftw2d(nMheighData)) N=nrow(shiftedFFT2Ddata) M=ncol(shiftedFFT2Ddata) NM=N*M return((1/NM^2) * abs(shiftedFFT2Ddata)^2) } #' Calculate the 2D Power Spectral Density #' #' PSD2DAgainstFrequency returns a data table of PSD 2D values against spatial frequencies #' #' @param AFMImage an \code{AFMImage} to be analysed #' @param AFMImagePSDAnalysis an \code{AFMImagePSDAnalysis} to store PSD analysis results #' @return \code{PSD2DAgainstFrequency} returns a data table of frequencies and PSD values #' \itemize{ #' \item freq: the considered frequency #' \item PSD: the considered PSD value #' \item type: PSD-2D #' \item fullfilename: directory and filename on the disk #' } #' @references Sidick2009, Erkin Sidick "Power Spectral Density Specification and Analysis of Large Optical Surfaces", 2009, "Modeling Aspects in Optical Metrology II, Proc. of SPIE Vol. 7390 73900L-1" #' @name PSD2DAgainstFrequency #' @rdname PSD2DAgainstFrequency-methods #' @exportMethod PSD2DAgainstFrequency #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' library(plyr) #' #' # Calculate Power Spectrum Density in 2D against frequency #' data("AFMImageOfNormallyDistributedHeights") #' oneAFMImage<-AFMImageOfNormallyDistributedHeights #' psd2d<-PSD2DAgainstFrequency(oneAFMImage) #' p <- ggplot(data=psd2d) #' p <- p + geom_point(aes(freq, PSD, color=type),subset = .(type %in% c("PSD-2D"))) #' p <- p + geom_line(aes(freq, PSD, color=type),subset = .(type %in% c("PSD-1D")),size=1.1) #' p <- p + scale_x_log10() #' p <- p + scale_y_log10() #' p <- p + ylab("PSD (nm^4)") #' p <- p + xlab("Frequency (nm^-1)") #' p <- p + ggtitle(basename(oneAFMImage@@fullfilename)) #' p #' } setGeneric(name= "PSD2DAgainstFrequency", def= function(AFMImage, AFMImagePSDAnalysis) { return(standardGeneric("PSD2DAgainstFrequency")) }) #' @rdname PSD2DAgainstFrequency-methods #' @aliases PSD2DAgainstFrequency,AFMImage-method setMethod(f="PSD2DAgainstFrequency", signature(AFMImage="AFMImage",AFMImagePSDAnalysis="AFMImagePSDAnalysis"), definition= function(AFMImage, AFMImagePSDAnalysis) { NyquistFq<-getNyquistSpatialFrequency(AFMImage) a=AFMImage@hscansize b=AFMImage@vscansize M=AFMImage@samplesperline N=AFMImage@lines NM=N*M # pixels^2 MN = M*N A=a*b ab=a*b dx=a/M dy=b/N shiftedPSDuv<-shiftedPSDuv(AFMImage) um = seq( (1-(M+1)/2)/(M*dx), (M-(M+1)/2)/(M*dx), by=1/(M*dx)) vn = seq( (1-(N+1)/2)/(N*dy), (N-(N+1)/2)/(N*dy), by=1/(N*dy)) K=meshgrid(um,vn) K$Z<-sqrt(K$X^2+K$Y^2) aAggregatedPSDValuesForEachFreq=data.frame(freq=sort(unique(as.vector(K$Z)))) totalLength <- length(aAggregatedPSDValuesForEachFreq$freq) frequencies<-c() sumedPSD<-c() counter<-0 for(freq in aAggregatedPSDValuesForEachFreq$freq) { if (freq > NyquistFq) break; if (!is.null(AFMImagePSDAnalysis@updateProgress)&& (is.function(AFMImagePSDAnalysis@updateProgress)&& (!is.null(AFMImagePSDAnalysis@updateProgress())))) { counter<-counter+1 if (counter/100==floor(counter/100)) { value<-counter / totalLength text <- paste0("freq:", round(freq, 2)," ", round(counter, 2),"/",totalLength) AFMImagePSDAnalysis@updateProgress(value= value, detail = text) } } inds <- arrayInd(which(K$Z == freq), dim(K$Z)) allPSDSum<-0 allPSDSum<-sum(shiftedPSDuv[inds[,1:2]]) sumedPSD = c(sumedPSD, allPSDSum) frequencies=c(frequencies, freq) } return(data.table(freq = frequencies, PSD = sumedPSD, type="PSD-2D", name=AFMImage@fullfilename)) } ) #' Calculate the 1D Power Spectral Density; returns a data table of PSD 1D and PSD 2D values #' against spatial frequencies.\cr As mentionned in Sidick2009, this function calculates the #' PSD against spatial frequencies in 1D from \code{\link{PSD2DAgainstFrequency}} by using #' breaks in the log space to sum PSD 2D and frequency values. #' #' @param AFMImage an \code{AFMImage} to be analysed #' @param AFMImagePSDAnalysis n \code{AFMImagePSDAnalysis} to store the setup and results of PSD analysis #' @return \code{PSD1DAgainstFrequency} returns a data table of frequencies and PSD values #' \itemize{ #' \item freq: the considered frequency #' \item PSD: the considered PSD value #' \item type: PSD-1D #' \item fullfilename: directory and filename on the disk #' } #' @name PSD1DAgainstFrequency #' @rdname PSD1DAgainstFrequency-methods #' @exportMethod PSD1DAgainstFrequency #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' library(plyr) #' library(scales) #' data("AFMImageOfNormallyDistributedHeights") #' newAFMImage<-AFMImageOfNormallyDistributedHeights #' newAFMImage@fullfilename<-"C:/Users/one/AFMImageOfNormallyDistributedHeights.txt" #' psdAnalysis<-AFMImagePSDAnalysis() #' # Create a closure to update progress #' psdAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { #' if (exists("progressPSD")){ #' if (!is.null(message)) { #' progressPSD$set(message = message, value = 0) #' }else{ #' progressPSD$set(value = value, detail = detail) #' } #' } #' } #' psdAnalysis@psd1d_breaks<-2^3 #' psdAnalysis@psd2d_truncHighLengthScale<-TRUE #' psdAnalysis<-performAllPSDCalculation(AFMImagePSDAnalysis= psdAnalysis, AFMImage= newAFMImage) #' datap<-psdAnalysis@psd1d #' p <- ggplot(data=datap) #' p <- p + geom_point(aes(freq, PSD, color=type),data=datap[datap$type %in% c("PSD-2D")]) #' p <- p + geom_line(aes(freq, PSD, color=type),data=datap[datap$type %in% c("PSD-1D")],size=1.1) #' p <- p + scale_x_log10() #' p <- p + scale_y_log10() #' p <- p + ylab("PSD (nm^4)") #' p <- p + xlab("Frequency (nm^-1)") #' p #' } setGeneric(name= "PSD1DAgainstFrequency", def= function(AFMImage,AFMImagePSDAnalysis) { return(standardGeneric("PSD1DAgainstFrequency")) }) #' @rdname PSD1DAgainstFrequency-methods #' @aliases PSD1DAgainstFrequency,AFMImage-method setMethod(f="PSD1DAgainstFrequency", "AFMImage", definition= function(AFMImage, AFMImagePSDAnalysis) { AFMImagePSDAnalysis@psd2d<-PSD2DAgainstFrequency(AFMImage, AFMImagePSDAnalysis) breaks=AFMImagePSDAnalysis@psd1d_breaks psd2dDT=AFMImagePSDAnalysis@psd2d # step 3, cut in the log space Q <- breaks maxRhoL<- max(psd2dDT$freq) maxRhoL psd2dDT$logcuts<-cut(log10(psd2dDT$freq),breaks = Q) meanFreq<-c() meanPSD<-c() totalLength<-length(unique(as.vector(psd2dDT$logcuts))) counter<-0 if (!is.null(AFMImagePSDAnalysis@updateProgress)&& (is.function(AFMImagePSDAnalysis@updateProgress)&& (!is.null(AFMImagePSDAnalysis@updateProgress())))) { text <- paste0("starting ", totalLength, " calculations") AFMImagePSDAnalysis@updateProgress(value= 0, detail = text) } for(freq in sort(unique(as.vector(psd2dDT$logcuts)))) { inds <- arrayInd(which(psd2dDT$logcuts == freq), dim(psd2dDT)) allFreqSum<-0 allPSDSum<-0 allFreqSum<-mean(psd2dDT$freq[inds[,1]]) allPSDSum<-mean(psd2dDT$PSD[inds[,1]]) meanFreq = c(meanFreq, allFreqSum) meanPSD = c(meanPSD, allPSDSum) if (!is.null(AFMImagePSDAnalysis@updateProgress)&& (is.function(AFMImagePSDAnalysis@updateProgress)&& (!is.null(AFMImagePSDAnalysis@updateProgress())))) { counter<-counter+1 if (counter/100==floor(counter/100)) { value<- counter / totalLength text <- paste0("freq:", round(freq, 2)," ", round(counter, 2),"/",totalLength) AFMImagePSDAnalysis@updateProgress(value= value, detail = text) } } } return(rbind(data.table(freq = meanFreq, PSD = meanPSD, type="PSD-1D", name=AFMImage@fullfilename), data.table(freq = psd2dDT$freq, PSD = psd2dDT$PSD, type="PSD-2D", name=psd2dDT$name))) }) #' Calculate the roughness of the sample against length scale #' #' The calculation of the roughness against lengthscale is performed throught a FFT 2D calculation, PSD 2D calculation and a meshgrid of frequencies. #' \code{RoughnessByLengthScale} returns a data.table of roughnesses against length scales #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImagePSDAnalysis n \code{AFMImagePSDAnalysis} to store the setup and results of PSD analysis #' #' @return a data table of lenght scale (r) and roughness values (roughness) #' \itemize{ #' \item {roughness: roughnesses} #' \item {r: length scales} #' \item {filename: fullfilename slot of the AFMImage} #' } #' @name RoughnessByLengthScale #' @rdname RoughnessByLengthScale-methods #' @exportMethod RoughnessByLengthScale #' @author M.Beauvais #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data("AFMImageOfNormallyDistributedHeights") #' oneAFMImage<-AFMImageOfNormallyDistributedHeights #' AFMImagePSDAnalysis<-AFMImagePSDAnalysis() #' data<-RoughnessByLengthScale(oneAFMImage, AFMImagePSDAnalysis) #' r<-roughness<-filename<-NULL #' p1 <- ggplot(data, aes(x=r, y=roughness, colour= basename(filename))) #' p1 <- p1 + geom_point() #' p1 <- p1 + geom_line() #' p1 <- p1 + ylab("roughness (nm)") #' p1 <- p1 + xlab("lengthscale (nm)") #' p1 #' } setGeneric(name= "RoughnessByLengthScale", def= function(AFMImage, AFMImagePSDAnalysis) { return(standardGeneric("RoughnessByLengthScale")) }) #' @rdname RoughnessByLengthScale-methods #' @aliases RoughnessByLengthScale,AFMImage-method setMethod(f="RoughnessByLengthScale", "AFMImage", definition= function(AFMImage, AFMImagePSDAnalysis) { # calculate roughness depending on frequency AFMImagePSDAnalysis@psd2d<-PSD2DAgainstFrequency(AFMImage, AFMImagePSDAnalysis) truncHighLengthScale = AFMImagePSDAnalysis@psd2d_truncHighLengthScale maxHighLengthScale = AFMImagePSDAnalysis@psd2d_maxHighLengthScale AggregatedPSDValuesForEachFreq = AFMImagePSDAnalysis@psd2d minFrequency<-1/min(AFMImage@hscansize, AFMImage@vscansize) indexfmin<-tail(which(AggregatedPSDValuesForEachFreq$freq < minFrequency), n=1) if (missing(truncHighLengthScale)||truncHighLengthScale==FALSE) { if(!missing(maxHighLengthScale)){ truncHighLengthScale <- FALSE if (maxHighLengthScale<(1/minFrequency)) { indexfmin<-which(AggregatedPSDValuesForEachFreq$freq > (1/maxHighLengthScale))[1]-1 } } } nyquistSF <- getNyquistSpatialFrequency(AFMImage) indexfmax<-which(AggregatedPSDValuesForEachFreq$freq > nyquistSF)[1]-1 #if (!isTRUE(truncHighLengthScale)||is.na(indexfmin)) indexfmin<-0 if (is.na(indexfmin)) indexfmin<-0 if (is.na(indexfmax)) indexfmax<-length(AggregatedPSDValuesForEachFreq$freq) r<-c() roughnesses=c() totalLength<-indexfmax counter<-0 for (i in seq(1,indexfmax)){ if (i>indexfmin) { tryingPSDSum<-sum(AggregatedPSDValuesForEachFreq$PSD[i:indexfmax]) roughnesses=c(roughnesses, sqrt(tryingPSDSum)) r=c(r, 1/AggregatedPSDValuesForEachFreq$freq[i]) if (!is.null(AFMImagePSDAnalysis@updateProgress)&& is.function(AFMImagePSDAnalysis@updateProgress)&& !is.null(AFMImagePSDAnalysis@updateProgress())) { counter<-counter+1 if (counter/100==floor(counter/100)) { value<-counter / totalLength text <- paste0(round(counter, 2),"/",totalLength) AFMImagePSDAnalysis@updateProgress(value= value, detail = text) } } } } return(data.table(filename=rep(AFMImage@fullfilename, length(AggregatedPSDValuesForEachFreq$freq)-indexfmin), r= r, roughness= roughnesses)) }) #' Get the Nyquist spatial frequency #' #' Get the Nyquist spatial frequency of an \code{\link{AFMImage}} calculated as following:\cr #' 0.5 multiplied by the minimum between the horizontal scansize divided by the number of samples per line and the vertical scansize divided by the number of lines #' #' \code{getNyquistSpatialFrequency} returns the Nyquist spatial frequency as a numeric #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return the Nyquist spatial frequency of the \code{\link{AFMImage}} #' @name getNyquistSpatialFrequency #' @rdname getNyquistSpatialFrequency-methods #' @exportMethod getNyquistSpatialFrequency #' @author M.Beauvais #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfNormallyDistributedHeights) #' NyquistSpatialFrequency<-getNyquistSpatialFrequency(AFMImageOfNormallyDistributedHeights) #' print(NyquistSpatialFrequency) #' } #' setGeneric(name= "getNyquistSpatialFrequency", def= function(AFMImage) { return(standardGeneric("getNyquistSpatialFrequency")) }) #' @rdname getNyquistSpatialFrequency-methods #' @aliases getNyquistSpatialFrequency,AFMImage-method setMethod(f="getNyquistSpatialFrequency", "AFMImage", definition= function(AFMImage) { M=AFMImage@samplesperline N=AFMImage@lines a=AFMImage@hscansize b=AFMImage@vscansize dx=a/M dy=b/N #old return(min(abs((1-(M+1)/2)/(M*dx)), abs((1-(N+1)/2)/(N*dy)))) return(min(1/(2*dx),1/(2*dy))) }) #' Get a zero padded AFMImage #' #' Get a zero padded \code{\link{AFMImage}} useful in Power Spectral Density analysis. The original \code{\link{AFMImage}} is padded with zero in order to get a larger square AFMImage which size is a power of 2. #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return a zero-padded \code{\link{AFMImage}} with a fullfilename equals to the original fullfilename pasted with padded-to-"ScanSize".txt #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfNormallyDistributedHeights) #' paddedAFMImage<-getPaddedAFMImage(AFMImageOfNormallyDistributedHeights) #' displayIn3D(AFMImage= paddedAFMImage, width= 1024,noLight=TRUE) #' } getPaddedAFMImage<-function(AFMImage) { paddedAFMImageMatrix<-matrix(AFMImage@data$h, nrow=AFMImage@samplesperline, ncol=AFMImage@lines,byrow = TRUE) N=nrow(paddedAFMImageMatrix) print(N) M=ncol(paddedAFMImageMatrix) print(M) rn = 2^(ceil(log2(N))) paddedN <- ifelse(rn==N, 2^(ceil(log2(N+1))), rn) rm = 2^(ceil(log2(M))) paddedM <- ifelse(rm==M, 2^(ceil(log2(M+1))), rm) addingN=paddedN/4 addingM=paddedM/4 A<-matrix( rep(0,addingM*N), nrow=N,ncol=addingM,byrow = TRUE) B<-matrix( rep(0,addingM*paddedN), nrow=addingM,ncol=paddedN,byrow = TRUE) paddedAFMImageMatrix<-cbind(A, paddedAFMImageMatrix, A) paddedAFMImageMatrix<-rbind(B, paddedAFMImageMatrix, B) Lines<-paddedN; Samplesperline<-paddedM; ScanSize<-AFMImage@hscansize*paddedM/M # not tested hscanSize<-AFMImage@hscansize*paddedM/M vscanSize<-AFMImage@vscansize*paddedN/N ScanSize<-max(hscanSize, vscanSize) scanSizeFromZero<-ScanSize-1 scanby<-ScanSize/Samplesperline endScan<-ScanSize*(1-1/Samplesperline) nM<-as.vector(t(paddedAFMImageMatrix)) AFMImage(data = data.table(x = rep(seq(0,endScan, by= scanby), times = Lines), y = rep(seq(0,endScan, by= scanby), each = Samplesperline), h = nM), samplesperline = Samplesperline, lines = Lines, hscansize = hscanSize, vscansize = vscanSize, scansize = ScanSize, fullfilename = paste(AFMImage@fullfilename, "padded-to-",ScanSize,".txt",sep="")) } #' Perform all the calculation for PSD exploitation #' #' \code{\link{performAllPSDCalculation}} perform all the calculation for PSD exploitation #' @param AFMImagePSDAnalysis an \code{\link{AFMImagePSDAnalysis}} to manage and store the results of PSD analysis #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfNormallyDistributedHeights) #' #' newAFMImage<-AFMImageOfNormallyDistributedHeights #' newAFMImage@fullfilename<-"C:/Users/one/AFMImageOfNormallyDistributedHeights.txt" #' psdAnalysis<-AFMImagePSDAnalysis() #' # Create a closure to update progress #' psdAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { #' if (exists("progressPSD")){ #' if (!is.null(message)) { #' progressPSD$set(message = message, value = 0) #' }else{ #' progressPSD$set(value = value, detail = detail) #' } #' } #' } #' psdAnalysis@psd1d_breaks<-2^3 #' psdAnalysis@psd2d_truncHighLengthScale<-TRUE #' psdAnalysis<-performAllPSDCalculation(AFMImagePSDAnalysis= psdAnalysis, AFMImage= newAFMImage) #' print("done psdAnalysis") #' } performAllPSDCalculation<-function(AFMImagePSDAnalysis, AFMImage) { if (is.function(AFMImagePSDAnalysis@updateProgress)) { AFMImagePSDAnalysis@updateProgress(message="1/3 - Calculating PSD2D", value=0) } AFMImagePSDAnalysis@psd2d<-PSD2DAgainstFrequency(AFMImage, AFMImagePSDAnalysis) if (is.function(AFMImagePSDAnalysis@updateProgress)) { AFMImagePSDAnalysis@updateProgress(message="2/3 Calculating PSD1D", value=0) } AFMImagePSDAnalysis@psd1d<-PSD1DAgainstFrequency(AFMImage, AFMImagePSDAnalysis) if (is.function(AFMImagePSDAnalysis@updateProgress)) { AFMImagePSDAnalysis@updateProgress(message="3/3 Calculating Roughness", value=0) } AFMImagePSDAnalysis@roughnessAgainstLengthscale<-RoughnessByLengthScale(AFMImage, AFMImagePSDAnalysis) return(AFMImagePSDAnalysis) } #' save an image of the roughness against lenghtscale calculations #' #' \code{\link{saveOnDiskIntersectionForRoughnessAgainstLengthscale}} save an image of the roughness against lenghtscale calculations #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to get Roughness against lenghtscale calculation #' @param exportDirectory a directory on the file system #' @author M.Beauvais #' @export saveOnDiskIntersectionForRoughnessAgainstLengthscale<-function(AFMImageAnalyser, exportDirectory){ sampleName<-basename(AFMImageAnalyser@fullfilename) data<-getSimplifiedRoughnessAgainstLenghscale(AFMImageAnalyser) data$r<-as.numeric(data$r) aval<-max(data$r) index<-which(data$r<= aval)[1] print(index) lengthData<-length(data$r)-index ndataw<-tail(data,n= lengthData) ndataw$sample<-basename(ndataw$filename) #find x1 x2 that minimizes Xinter min=nrow(data) max=2 point<- data[data$r %in% min(data$r)] otherpoints<-as.numeric(point$V1)-min point1<-data[as.numeric(data$V1) %in% (otherpoints)] otherpoints<-as.numeric(point$V1)-max point2<-data[as.numeric(data$V1) %in% (otherpoints)] point1<-data[min] point2<-data[max] origintangeantePoints = data.table(x=c(point1$r, point2$r), y=c(point1$roughness, point2$roughness)) aorigin<-0 borigin <- point1$roughness - aorigin * point2$r x1x2=AFMImageAnalyser@psdAnalysis@intersections[c(2,3,5,6)] print(x1x2) for(i in c(0,2)) { x1<-x1x2[i+1] x2<-x1x2[i+2] print(x1) point1<-data[x1] point2<-data[x2] x=data[seq(x2,x1)]$r y=data[seq(x2,x1)]$roughness res <- lm(y~x) coefficients(res) b<-unname(res$coefficients[1]) a<-unname(res$coefficients[2]) tangeantePoints = data.table(x=c(point1$r, point2$r), y=c(point1$roughness, point2$roughness)) xinter <- (b-borigin)/(aorigin-a) title<-paste(" -Lc= ", xinter," -plateau= ", borigin) roughness<-r<-NULL p1 <- ggplot(ndataw, aes(x=r, y=roughness, colour= basename(sample))) p1 <- p1 + geom_point() p1 <- p1 + geom_line() p1 <- p1 + geom_abline(intercept = b, slope = a) p1 <- p1 +geom_point(data=tangeantePoints, aes(x=x, y=y), color="blue") p1 <- p1 + geom_abline(intercept = borigin, slope = aorigin) p1 <- p1 +geom_point(data=origintangeantePoints, aes(x=x, y=y), color="blue") p1 <- p1 + ylab("roughness (nm)") p1 <- p1 + xlab("lengthscale (nm)") p1 <- p1 + guides(colour=FALSE) p1 <- p1 + ggtitle(title) exportpng2FullFilename=getRoughnessAgainstLengthscaleIntersection(exportDirectory, paste( sampleName, i, sep="-")) print(paste("saving", basename(exportpng2FullFilename))) png(filename=exportpng2FullFilename, units = "px", width=800, height=800) print(p1) dev.off() } } #' get the intersection between tangente and plateau #' #' \code{\link{getAutoIntersectionForRoughnessAgainstLengthscale}} get the intersection between tangente and plateau #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to get Roughness against lenghtscale calculation #' @param second_slope a boolean to manage first or second slope in the roughness against lenghtscale curve #' @return a \code{\link{AFMImagePSDSlopesAnalysis}} #' @author M.Beauvais #' @export getAutoIntersectionForRoughnessAgainstLengthscale<-function(AFMImageAnalyser, second_slope=FALSE){ # sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename) # exportDirectory<-paste(dirname(AFMImageAnalyser@AFMImage@fullfilename), "outputs", sep="/") data<-getSimplifiedRoughnessAgainstLenghscale(AFMImageAnalyser) data$r<-as.numeric(data$r) aval<-max(data$r) index<-which(data$r<= aval)[1] lengthData<-length(data$r)-index print(paste("lengthData=",lengthData)) #lengthData # ndataw<-tail(data,n= lengthData) # ndataw$sample<-basename(ndataw$filename) # print(paste("length(ndataw)=",length(ndataw))) lengthData<-nrow(data) #newMax=ceiling(data[c(lengthData),]$r/20) minW<-which.min(abs(data$r - data[c(lengthData),]$r/20)) newMax=minW print(paste("first slope - newMax= ",newMax)) #newMax_second_slope=ceiling(data[c(lengthData),]$r/10) #newMax_second_slope=ceiling(data[c(lengthData),]$r/10) minW<-which.min(abs(data$r - data[c(lengthData),]$r/10)) minW data[minW] newMax_second_slope=minW print(paste("second slope newMax_second_slope= ",newMax_second_slope)) minimumR <- function(data, space, x, y, second_slope) { lengthData<-nrow(data) aorigin<-0 borigin <- data[lengthData]$roughness #print(borigin) finalres2=c() if (!second_slope) finalres = c(Inf,0,0,0,0,0) else finalres = c(Inf,0,0,0,0,0) for (i in seq(1, length(x))) { x1=x[i] for (j in seq(1, length(y))) { if (abs(j-i)>space) { x2=y[j] if ((x1<1)||(x2<1)||(x1>lengthData)||(x2>lengthData)||(x1==x2)) { inter <- data[1]$r } else{ if (x1<x2) { myx=data[seq(x1,x2)]$r myy=data[seq(x1,x2)]$roughness } if (x1>x2) { myx=data[seq(x2,x1)]$r myy=data[seq(x2,x1)]$roughness } slope=(myy[2]-myy[1])/(myx[2]-myx[1]) yintersept = myy[1] - slope * myx[1] res <- lm(myy~myx) b<-unname(res$coefficients[1]) a<-unname(res$coefficients[2]) inter <- (borigin-b)/a slope <- a yintersept <- b if ((!second_slope&(inter<finalres[1])&(inter>0))| (second_slope&(inter<finalres[1])&(inter>0))){ finalres=c(inter, x1, x2, borigin, slope, yintersept) print(finalres) # print(paste(a,b)) # print(paste(myx[1],myx[2],myy[1],myy[2])) } } } #print(paste(x1, x2)) } #finalres2=c(finalres2, inter) } AFMImagePSDSlopesAnalysis = new("AFMImagePSDSlopesAnalysis") AFMImagePSDSlopesAnalysis@lc=finalres[1] AFMImagePSDSlopesAnalysis@tangente_point1=finalres[2] AFMImagePSDSlopesAnalysis@tangente_point2=finalres[3] AFMImagePSDSlopesAnalysis@wsat=finalres[4] AFMImagePSDSlopesAnalysis@slope=finalres[5] AFMImagePSDSlopesAnalysis@yintersept=finalres[6] #print(AFMImagePSDSlopesAnalysis) return(AFMImagePSDSlopesAnalysis) } if (second_slope==FALSE) { aby<-1 print(aby) x <- seq(1, newMax,by=aby) print(x) z <- minimumR(data, space= 1, x,x,second_slope) } else { aby<-1 print(aby) space=ceiling(lengthData/4) space=ceiling(lengthData/8) space=ceiling(lengthData/10) space=30 print(paste("space= ",space)) x <- seq(newMax_second_slope,lengthData, by=aby) z <- minimumR(data, space= space, x,x,second_slope) } return(z) } #' get the intersection between tangente and plateau #' #' \code{\link{getIntersectionForRoughnessAgainstLengthscale}} get the intersection between tangente and plateau #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to get Roughness against lenghtscale calculation #' @param minValue index of the lowest point to be used for the tangent #' @param maxValue index of the highest point to be used for the tangent #' @param second_slope a boolean to manage first or second slope in the roughness against lenghtscale curve #' @return a \code{\link{AFMImagePSDSlopesAnalysis}} #' @author M.Beauvais #' @export getIntersectionForRoughnessAgainstLengthscale<-function(AFMImageAnalyser, minValue, maxValue, second_slope=FALSE){ # sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename) # exportDirectory<-paste(dirname(AFMImageAnalyser@AFMImage@fullfilename), "outputs", sep="/") data<-getSimplifiedRoughnessAgainstLenghscale(AFMImageAnalyser) data$r<-as.numeric(data$r) aval<-max(data$r) index<-which(data$r<= aval)[1] lengthData<-length(data$r)-index print(paste("lengthData=",lengthData)) lengthData<-nrow(data) minimumR <- function(data, x1, x2) { lengthData<-nrow(data) aorigin<-0 borigin <- data[lengthData]$roughness #print(borigin) finalres2=c() if ((x1<1)||(x2<1)||(x1>lengthData)||(x2>lengthData)||(x1==x2)) { inter <- data[1]$r } else{ if (x1<x2) { myx=data[seq(x1,x2)]$r myy=data[seq(x1,x2)]$roughness } if (x1>x2) { myx=data[seq(x2,x1)]$r myy=data[seq(x2,x1)]$roughness } slope=(myy[2]-myy[1])/(myx[2]-myx[1]) yintersept = myy[1] - slope * myx[1] res <- lm(myy~myx) b<-unname(res$coefficients[1]) a<-unname(res$coefficients[2]) inter <- (borigin-b)/a slope <- a yintersept <- b finalres=c(inter, x1, x2, borigin, slope, yintersept) print(finalres) # print(paste(a,b)) # print(paste(myx[1],myx[2],myy[1],myy[2])) } AFMImagePSDSlopesAnalysis = new("AFMImagePSDSlopesAnalysis") AFMImagePSDSlopesAnalysis@lc=finalres[1] AFMImagePSDSlopesAnalysis@tangente_point1=finalres[2] AFMImagePSDSlopesAnalysis@tangente_point2=finalres[3] AFMImagePSDSlopesAnalysis@wsat=finalres[4] AFMImagePSDSlopesAnalysis@slope=finalres[5] AFMImagePSDSlopesAnalysis@yintersept=finalres[6] #print(AFMImagePSDSlopesAnalysis) return(AFMImagePSDSlopesAnalysis) } AFMImagePSDSlopesAnalysis <- minimumR(data, minValue,maxValue) return(AFMImagePSDSlopesAnalysis) } getRoughnessAgainstLengthscale<-function(exportDirectory, sampleName) { exportCsvFilename<-paste(sampleName,"-roughness-against-lengthscale.png", sep="") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") return(exportCsvFullFilename) } getRoughnessAgainstLengthscale10nm<-function(exportDirectory, sampleName) { exportCsvFilename<-paste(sampleName,"-roughness-against-lengthscale-10nm.png", sep="") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") return(exportCsvFullFilename) } getRoughnessAgainstLengthscaleIntersection<-function(exportDirectory, sampleName) { exportpngFilename<-paste(sampleName,"-roughness-against-lengthscale-intersection.png", sep="") exportpngFullFilename<-paste(exportDirectory, exportpngFilename, sep="/") return(exportpngFullFilename) } getSimplifiedRoughnessAgainstLenghscale<-function(AFMImageAnalyser) { ral<-copy(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale) ral<-ral[order(r)] ral$simplyfied_r<-round(ral$r) ral<-ral[!duplicated(ral$simplyfied_r),] roughness<-r<-NULL resPSD = data.table( filename=ral$filename, r = ral$r, roughness=ral$roughness ) return(resPSD) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMPSDAnalyser.R
require("fftwtools") require("pracma") require("data.table") require("gstat") require(sp) require("stringr") require(gridExtra) require(ggplot2) #require(reshape2) # for reporting require(png) require(grid) if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("r", "roughness", "dir.hor", "id","name","press","ang1","ang2","ang3","anis1","anis2","id")) #' Generate a pdf report for all AFM images in a directory #' #' A function to generate a pdf report for each \code{\link{AFMImage}} in a directory. Images should be in export Nanoscope format as the \code{\link{importFromNanoscope}} function will be used. #' #' @param imageDirectory a directory where are located image as Nanoscope export format #' @param imageNumber (optional) an image number in the directory. If it is set only the selected image will be processed. #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' # A report will be generated for all the images in imageDirectory directory #' # imageDirectory="c:/images" #' imageDirectory=tempdir() #' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory) #' #' # A report will be generated for the fifth image in the imageDirectory directory #' exit<-generateReportFromNanoscopeImageDirectory(imageDirectory,5) #' } generateReportFromNanoscopeImageDirectory<-function(imageDirectory, imageNumber) { filesToProcess<-list.files(imageDirectory, include.dirs = FALSE, recursive = FALSE,full.names = TRUE,pattern = "\\.txt$") if (!missing(imageNumber)) { if (imageNumber<=length(filesToProcess)){ filesToProcess<-filesToProcess[imageNumber] }else{ print(paste("Selected image number",imageNumber,paste("exceeds the number of image in directory (",length(filesToProcess),")", sep=""))) return(FALSE) } } for(fullfilename in filesToProcess){ AFMImage<-importFromNanoscope(fullfilename) generateReport(AFMImage) } return(TRUE) } #' Generate an analysis report for one AFMImage #' #' A function to analyse an \code{\link{AFMImage}} and save on disk the analysis. The analysis are saved in outputs directory located in the image directory. #' All the rdata and image files in the reportDirectory directory are loaded to generate one report for one \code{\link{AFMImage}}. #' #' @param AFMImage an \code{\link{AFMImage}} to be analysed #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package #' data("AFMImageOfRegularPeaks") #' AFMImage<-AFMImageOfRegularPeaks #' #' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu" #' exportDirectory=tempdir() #' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/") #' #' # Start to check if your sample is normaly distributed and isotropic. #' generateCheckReport(AFMImage) #' # If the sample is normaly distributed and isotropic, generate a full report #' generateReport(AFMImage) #' #' #' # Analyse your own AFM image from nanoscope analysis (TM) software tool #' anotherAFMImage<-importFromNanoscope("c:/users/my_windows_login/myimage.txt") #' # Start to check if your sample is normaly distributed and isotropic. #' generateCheckReport(anotherAFMImage) #' # If your sample is normaly distributed and isotropic, generate a full report #' generateReport(anotherAFMImage) #' } generateReport <- function(AFMImage) { sampleName<-basename(AFMImage@fullfilename) sampleDirectory<-dirname(AFMImage@fullfilename) print(paste("generating a full Report for", sampleName, "in", sampleDirectory)) reportDirectory<-paste(sampleDirectory, "outputs", sep="/") createReportDirectory(reportDirectory) AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage=AFMImage, fullfilename= AFMImage@fullfilename) AFMImageAnalyser<-analyse(AFMImageAnalyser=AFMImageAnalyser) putAnalysisOnDisk(AFMImageAnalyser=AFMImageAnalyser, AFMImage=AFMImage) # # find rdata file for the AFMImage # rdata_directoryfiles<-list.files(reportDirectory, # include.dirs = FALSE, recursive = FALSE, full.names = TRUE, # pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-")) # if (length(rdata_directoryfiles)>0) { reportFullfilename<-paste(reportDirectory, paste(sampleName,"fullreport.pdf",sep="-"),sep="/") generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = FALSE) # }else{ # print("analysis not found...") # print(paste(sampleName,"AFMImageAnalyser.rda",sep="-")) # } print("done") } #' Generate a check report for one AFMImage #' #' Generate a check report in pdf format in order to analyse the distribution and the isotropy of heights of the \code{\link{AFMImage}}. #' #' @param AFMImage an \code{\link{AFMImage}} imported from Nanoscope Analysis(TM) with \code{\link{importFromNanoscope}} or created manually \code{\link{AFMImage}} #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' # Analyse the AFMImageOfRegularPeaks AFMImage sample from this package #' data("AFMImageOfRegularPeaks") #' AFMImage<-AFMImageOfRegularPeaks #' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu" #' exportDirectory=tempdir() #' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/") #' #' # Start to check if your sample is normaly distributed and isotropic. #' generateCheckReport(AFMImage) #' # If the sample is normaly distributed and isotropic, generate a full report #' generateReport(AFMImage) #' #' # Analyse your own AFM image from nanoscope analysis (TM) software tool #' anotherAFMImage<-importFromNanoscope("c:/users/me/myimage.txt") #' # Start to check if your sample is normaly distributed and isotropic. #' generateCheckReport(anotherAFMImage) #' # If your sample is normaly distributed and isotropic, generate a full report #' generateReport(anotherAFMImage) #' } generateCheckReport <- function(AFMImage) { sampleName<-basename(AFMImage@fullfilename) sampleDirectory<-dirname(AFMImage@fullfilename) print(paste("Generating a check report for", sampleName, "in", sampleDirectory)) reportDirectory<-paste(sampleDirectory, "outputs", sep="/") createReportDirectory(reportDirectory) AFMImageAnalyser<-new("AFMImageAnalyser", AFMImage= AFMImage, fullfilename = AFMImage@fullfilename) AFMImageAnalyser<-checkIsotropy(AFMImage,AFMImageAnalyser) putAnalysisOnDisk(AFMImageAnalyser, AFMImage) # sampleName<-basename(AFMImage@fullfilename) # rdata_directoryfiles<-list.files(reportDirectory, # include.dirs = FALSE, recursive = FALSE, full.names = TRUE, # pattern = paste(sampleName,"AFMImageAnalyser.rda$",sep="-")) # if (length(rdata_directoryfiles)>0) { reportFullfilename<-paste(reportDirectory, paste(sampleName,"checkreport.pdf",sep="-"),sep="/") generateAFMImageReport(AFMImageAnalyser, reportFullfilename, isCheckReport = TRUE) # }else{ # print("analysis not found...") # print(paste(sampleName,"AFMImageAnalyser.rda",sep="-")) # } print("done") } #' @title Generate an analysis report from an AFMImageAnalyser object #' #' @description \code{generateAFMImageReport} generates a report from an AFMImageAnalyser object #' #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report #' @param reportFullfilename location on disk where to save the generated report #' @param isCheckReport TRUE to generate a check report must be generated, FALSE to generate a full report #' @author M.Beauvais #' @export generateAFMImageReport<-function(AFMImageAnalyser, reportFullfilename, isCheckReport){ numberOfModelsPerPage=3 if (missing(isCheckReport)) { isCheckReport = TRUE } AFMImage<-AFMImageAnalyser@AFMImage # # load AFMImageAnalyser in rda format # print(paste("loading", basename(oneAFMImageAnalyserFile))) # # x = load(file = oneAFMImageAnalyserFile) # AFMImageAnalyser= get(x) # rm(x) # fullfilename <- AFMImageAnalyser@AFMImage@fullfilename sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename) reportDirectory=dirname(AFMImageAnalyser@fullfilename) createReportDirectory(reportDirectory) # # # load image in rda format # afmImageFullfilename<-paste(dirname(oneAFMImageAnalyserFile) ,paste(sampleName, "AFMImage.rda", sep="-"),sep="/") # print(paste("loading", basename(afmImageFullfilename))) # x = load(file = afmImageFullfilename) # AFMImage= get(x) # rm(x) # # # # print(paste("processing image", sampleName)) # # save all images necessary for the report on disk putImagesFromAnalysisOnDisk(AFMImageAnalyser, AFMImage, reportDirectory) print(paste("creating", basename(reportFullfilename), "...")) pdf(reportFullfilename, width=8.27, height=11.69) # first page rglImagefullfilename<-get3DImageFullfilename(reportDirectory, sampleName) print(paste("reading", basename(rglImagefullfilename), "...")) img <- readPNG(rglImagefullfilename) roughnesses<-getRoughnessParameters(AFMImageAnalyser@AFMImage) basicImageInfo<-data.table(name=c("Scan size", "Samples per line", "Lines", "Total Rrms", "Ra (mean roughness)"), values=c(paste(AFMImageAnalyser@AFMImage@scansize,"nm"), paste(as.character(AFMImageAnalyser@AFMImage@samplesperline)), paste(as.character(AFMImageAnalyser@AFMImage@lines)), paste(round(roughnesses$totalRMSRoughness_TotalRrms, digits=4),"nm"), paste(round(roughnesses$MeanRoughness_Ra, digits=4),"nm"))) imageInformationDTPlot<-getGgplotFromDataTable(basicImageInfo, removeRowNames= TRUE, removeColNames=TRUE) grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(5, 4))) vp1<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4) grid.raster(img,vp=vp1) vp0<-viewport(layout.pos.row = 1, layout.pos.col = 2:3) grid.text(sampleName, vp=vp0, gp=gpar(fontsize=20, col="black")) vp2<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:4) print(imageInformationDTPlot,vp=vp2) # page for checking # normality / omni direction of samples if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) { exportpng2FullFilename<-getDirectionalVarioPngFullfilename(reportDirectory, sampleName) print(paste("reading",basename(exportpng2FullFilename))) directionalVariograms<-readPNG(exportpng2FullFilename) grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(4, 4))) qq <- checkNormalityQQ(AFMImage) m <- checkNormalityDensity(AFMImage) vp2<- viewport(layout.pos.row = 1:2, layout.pos.col = 1:2) print(qq, vp = vp2) vp3<-viewport(layout.pos.row = 1:2, layout.pos.col = 3:4) print(m, vp = vp3) vp4<-viewport(layout.pos.row = 3:4, layout.pos.col = 1:4) grid.raster(directionalVariograms,vp=vp4) } if (!isCheckReport) { # get variogram model evaluation if (!length(AFMImageAnalyser@variogramAnalysis@variogramModels)==0) { mergedDT<-getDTModelEvaluation(AFMImageAnalyser@variogramAnalysis) print(mergedDT) sillrangeDT<-getDTModelSillRange(AFMImageAnalyser@variogramAnalysis) setkey(sillrangeDT, "model") name<-press<-NULL sampleDT <- mergedDT[name==basename(AFMImageAnalyser@AFMImage@fullfilename)] setkey(sampleDT, "model") #sampleDT <- sampleDT[cor>0.98] sampleDT<-merge(sampleDT, sillrangeDT, by="model") sampleDT<-sampleDT[,name:=NULL] sampleDT <- unique(sampleDT) sampleDT <- sampleDT[order(-rank(cor), rank(press))] print(basename(AFMImageAnalyser@AFMImage@fullfilename)) print(basename(AFMImageAnalyser@fullfilename)) print(sampleDT) summarySampleDT<-copy(sampleDT) summarySampleDT$press<-round(sampleDT$press) summarySampleDT$sill<-round(sampleDT$sill) summarySampleDT$range<-round(sampleDT$range) print("plotting variogram table...") existsVariogramModel<-TRUE if (nrow(sampleDT)!=0) { plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT, removeRowNames=TRUE, removeColNames=FALSE) }else{ print("no good variogram table...") existsVariogramModel<-FALSE sampleDT <- mergedDT[name==basename(fullfilename)] sampleDT <- unique(sampleDT) sampleDT <- sampleDT[order(-rank(cor), rank(press))] plotBestVariogramModelsTable<-getGgplotFromDataTable(summarySampleDT, removeRowNames=TRUE, removeColNames=FALSE) } #print(plotBestVariogramModelsTable) } # best variogram models page if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) { # chosen sample grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(7, 2))) sampleSpplotfullfilename<-getSpplotImagefullfilename(reportDirectory, sampleName) print(paste("reading",basename(sampleSpplotfullfilename))) sampleImg <- readPNG(sampleSpplotfullfilename) sampleSpplotfullfilename<-getVarioPngchosenFitSample(reportDirectory, sampleName) print(paste("reading",basename(sampleSpplotfullfilename))) chosenFitSampleImg <- readPNG(sampleSpplotfullfilename) vp0<- viewport(layout.pos.row = 1, layout.pos.col = 1:2) grid.text("Variogram analysis", vp=vp0, gp=gpar(fontsize=20, col="black")) vp1<- viewport(layout.pos.row = 2:3, layout.pos.col = 1) grid.raster(sampleImg,vp=vp1) #vp3<-viewport(layout.pos.row = 9, layout.pos.col = 1) #grid.text("Original", vp=vp3, gp=gpar(fontsize=10, col="black")) vp2<- viewport(layout.pos.row = 2:3, layout.pos.col = 2) grid.raster(chosenFitSampleImg,vp=vp2) #vp4<-viewport(layout.pos.row = 9, layout.pos.col = 2) #grid.text("Sample", vp=vp4, gp=gpar(fontsize=10, col="black")) totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels) #print(totalVariogramModels) if (totalVariogramModels>0) { vp5<-viewport(layout.pos.row = 4:7, layout.pos.col = 1:2) print(plotBestVariogramModelsTable,vp=vp5) printVariogramModelEvaluations(AFMImageAnalyser, sampleDT, numberOfModelsPerPage) } } # Roughness against length scale if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) { grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(7, 2))) vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:2) grid.text("Roughness vs. lengthscale", vp=vp0, gp=gpar(fontsize=20, col="black")) exportCsvFullFilename<-getRoughnessAgainstLengthscale(reportDirectory, sampleName) print(paste("reading",basename(exportCsvFullFilename))) samplePredictedImg <- readPNG(exportCsvFullFilename) vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 1) grid.raster(samplePredictedImg,vp=vp1) exportCsvFullFilename<-getRoughnessAgainstLengthscale10nm(reportDirectory, sampleName) print(paste("reading",basename(exportCsvFullFilename))) samplePredictedImg <- readPNG(exportCsvFullFilename) vp1<-viewport(layout.pos.row = 2:4, layout.pos.col = 2) grid.raster(samplePredictedImg,vp=vp1) for(i in c(0,1)) { exportpng2FullFilename=getRoughnessAgainstLengthscaleIntersection(reportDirectory, paste(sampleName,i*2,sep="-")) if (file.exists(exportpng2FullFilename)) { print("intersection inserted...") img<-readPNG(exportpng2FullFilename) vp2<-viewport(layout.pos.row = 5:7, layout.pos.col = i+1) grid.raster(img,vp=vp2) } } } # export fractal dimension if (!length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)==0) { grid.newpage() # Open a new page on grid device pushViewport(viewport(layout = grid.layout(7, 4))) vp0<-viewport(layout.pos.row = 1, layout.pos.col = 1:4) grid.text("Fractal dimension analysis", vp=vp0, gp=gpar(fontsize=20, col="black")) n=length(AFMImageAnalyser@fdAnalysis@fractalDimensionMethods) print(n) if (n>0) { sampleDT <- data.table( fd_method= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_method)), fd= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd)), fd_scale= c(sapply(1:n, function(i) AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_scale))) # sampleDT <- data.table( AFMImageAnalyser@fdAnalysis@fractalDimensionMethods) # setnames(sampleDT,c("method","scale"),c("fd_method","fd_scale")) print(sampleDT) #sampleDT <- sampleDT[,c(2,13,14,15), with = FALSE] setkey(sampleDT, "fd_method") sampleDT <- unique(sampleDT) name<-NULL plotFractalDimensionTable<-getGgplotFromDataTable(sampleDT[, name:=NULL]) vp3<-viewport(layout.pos.row = 2:3, layout.pos.col = 1:4) print(plotFractalDimensionTable,vp=vp3) exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "isotropic") if (file.exists(exportpng2FullFilename)) { img<-readPNG(exportpng2FullFilename) vp4<-viewport(layout.pos.row = 4:5, layout.pos.col = 1:2) grid.raster(img,vp=vp4) } exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "squareincr") if (file.exists(exportpng2FullFilename)) { img<-readPNG(exportpng2FullFilename) vp5<-viewport(layout.pos.row = 4:5, layout.pos.col = 3:4) grid.raster(img,vp=vp5) } exportpng2FullFilename=getFractalDimensionsPngFullfilename(reportDirectory, sampleName, "filter1") if (file.exists(exportpng2FullFilename)) { img<-readPNG(exportpng2FullFilename) vp6<-viewport(layout.pos.row = 6:7, layout.pos.col = 1:2) grid.raster(img,vp=vp6) } } } } dev.off() rm(AFMImageAnalyser) } #' @title printVariogramModelEvaluations #' #' @description \code{printVariogramModelEvaluations} generates a graphic element containing the evaluation of all variogram models #' #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} to be used to produce report #' @param numberOfModelsPerPage numeric to specify the number of model evaluations per pages #' @param sampleDT a data.table containg the evaluation information #' @author M.Beauvais #' @export printVariogramModelEvaluations<-function(AFMImageAnalyser, sampleDT, numberOfModelsPerPage){ error<-predicted<-realH<-nbPointsPercent<-numberOfPoints<-NULL ##################### # new page for experimental variogram and models experimentalVariogramDT<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram experimentalVariogramDT$name<-"Experimental" #drops <- c("dir.hor","dir.ver","id","np") experimentalVariogramDT<-experimentalVariogramDT[,c("dir.hor","dir.ver","id","np"):=NULL] #names(experimentalVariogramDT) sampleName<-basename(AFMImageAnalyser@AFMImage@fullfilename) sampleSpplotfullfilename<-getSpplotImagefullfilename(tempdir(), sampleName) saveSpplotFromAFMImage(AFMImageAnalyser@AFMImage, sampleSpplotfullfilename, withoutLegend=TRUE) #print(paste("reading",basename(sampleSpplotfullfilename))) #sampleImg<-getSpplotFromAFMImage(AFMImage=AFMImageAnalyser@AFMImage, expectedWidth=80, expectHeight=60, withoutLegend=TRUE) sampleImg <- readPNG(sampleSpplotfullfilename) allVarioModels<-str_sub(sampleDT$model,-3) i<-1 for (i in seq(1:length(allVarioModels))) { indexInPage<-i%%numberOfModelsPerPage if (indexInPage==1) { # Open a new page grid.newpage() pushViewport(viewport(layout = grid.layout(numberOfModelsPerPage*2, 3))) } if (indexInPage==0)indexInPage=numberOfModelsPerPage #print(indexInPage) #plot experimental variogram and model variogram vp1<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 2) grid.raster(sampleImg,vp=vp1) # pushViewport(vp1) # print(sampleImg,newpage=FALSE) # popViewport(1) #print(i) allVariogramModelEvaluation<-AFMImageAnalyser@variogramAnalysis@variogramModels for (j in seq(1:length(allVariogramModelEvaluation))) { if (allVariogramModelEvaluation[j][[1]]@fit.v[2]$model==allVarioModels[i]) break; } #print(j) #print(allVariogramModelEvaluation[j][[1]]@fit.v[2]$model) #predictedfullfilename<-getSpplotPredictedImageFullfilename(reportDirectory, sampleName, allVarioModels[i]) modelName<-allVariogramModelEvaluation[j][[1]]@fit.v[2]$model part_valid_pr<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige cuts<-AFMImageAnalyser@variogramAnalysis@cuts withoutLegend<-TRUE colLimit<-length(cuts)+3 cols <- getSpplotColors(colLimit) # density plot for percent error (over total amplitude) amplitudeReal<-abs(max(AFMImageAnalyser@AFMImage@data$h)-min(AFMImageAnalyser@AFMImage@data$h)) statsHeightsDT<-data.table(realH=AFMImageAnalyser@AFMImage@data$h, predicted = as.vector(unlist(part_valid_pr["var1.pred"]@data))) statsHeightsDT[,error:=(predicted-realH)/amplitudeReal,] statsHeightsDT resStatsDT<-data.table(step=c(0), numberOfPoints=c(0)) totalPoints<-length(statsHeightsDT$error) for (i in seq(0,max(statsHeightsDT$error), by=(max(statsHeightsDT$error)/20))) { nbPoints<-length(statsHeightsDT$error[abs(statsHeightsDT$error)<i]) resStatsDT<-rbind(resStatsDT, data.table(step=c(i), numberOfPoints=c(nbPoints))) } resStatsDT<-resStatsDT[-c(1,2),] resStatsDT resStatsDT[, nbPointsPercent:=numberOfPoints/totalPoints,] resStatsDT errorData<-data.frame(error=statsHeightsDT$error) predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(tempdir(), sampleName, modelName) saveSpplotFromKrige(predictedspplotfullfilename, modelName, part_valid_pr,cuts, withoutLegend = TRUE) # TODO save on disk as png and read # read image on disk #print(paste("reading", basename(predictedspplotfullfilename))) samplePredictedImg <- readPNG(predictedspplotfullfilename) #samplePredictedImg<-spplot(part_valid_pr["var1.pred"], cuts=cuts, col.regions=cols,key=list(lines=FALSE, col="transparent")) vp2<-viewport(layout.pos.row = (indexInPage-1)*2+1, layout.pos.col = 3) grid.raster(samplePredictedImg,vp=vp2) # pushViewport(vp2) # print(samplePredictedImg,newpage=FALSE) # popViewport(1) ang1<-ang2<-ang3<-anis1<-anis2<-name<-NULL fit.v<-allVariogramModelEvaluation[j][[1]]@fit.v vgm1<-vgm(fit.v[2]$psill, fit.v[2]$model, fit.v[2]$range, kappa = fit.v[2]$kappa, anis = c(fit.v[2]$anis1, fit.v[2]$anis2), add.to = vgm(fit.v[1]$psill, fit.v[1]$model, fit.v[1]$range,kappa = fit.v[1]$kappa,anis = c(fit.v[1]$anis1, fit.v[1]$anis2))) newModelDT<-data.table(vgm1) setnames(newModelDT, "psill", "sill" ) newModelDT<-rbind(newModelDT, sampleDT[i], fill=TRUE) newModelDT<- newModelDT[, ang1:=NULL] newModelDT<- newModelDT[, ang2:=NULL] newModelDT<- newModelDT[, ang3:=NULL] newModelDT<- newModelDT[, anis1:=NULL] newModelDT<- newModelDT[, anis2:=NULL] plotVariogramModelTable<-getGgplotFromDataTable(newModelDT[,name:=NULL]) vp4<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3) print(vp=vp4, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE) # variogram from model myvgm<-experimentalVariogramDT experimentalVariogramDTnrow=nrow(myvgm) class(myvgm) = c("gstatVariogram", "data.frame") myvgm$np=rep(1,experimentalVariogramDTnrow) myvgm$dir.hor=rep(0,experimentalVariogramDTnrow) myvgm$dir.ver=rep(0,experimentalVariogramDTnrow) myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow) begin<-(indexInPage-1)*2+1 vp3<-viewport(layout.pos.row = begin:(begin+1), layout.pos.col = 1, width=100, height=100) vgLine <- rbind( cbind(variogramLine(vgm1, maxdist = max(myvgm$dist)), id = "Raw") ) p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point() p1 <- p1 + ylab("semivariance") p1 <- p1 + xlab("distance (nm)") p1 <- p1 + ggtitle("Semivariogram") p1 <- p1 + guides(colour=FALSE) p1 <- p1 + expand_limits(y = 0) print(p1,vp=vp3) grid.newpage() vp5<-viewport(layout.pos.row = begin:(begin+2), layout.pos.col = 1, width=200, height=200) p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_line(data = vgLine) + geom_point() p1 <- ggplot(errorData,aes(error, fill =c(1))) + geom_density(alpha = 0.2) + guides(fill=FALSE)+ theme(legend.position="none") # p1 + ylab("semivariance") # p1 <- p1 + xlab("distance (nm)") # p1 <- p1 + ggtitle("Semivariogram") # p1 <- p1 + guides(colour=FALSE) # p1 <- p1 + expand_limits(y = 0) print(p1,vp=vp5) plotVariogramModelTable<-getGgplotFromDataTable(resStatsDT) vp6<-viewport(layout.pos.row = (indexInPage-1)*2+1+1, layout.pos.col = 2:3) print(vp=vp6, plotVariogramModelTable, row.names= FALSE, include.rownames=FALSE) } } getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) { if (missing(removeRowNames)) removeRowNames<-TRUE if (missing(removeColNames)) removeColNames<-FALSE mytheme <- gridExtra::ttheme_default( core = list(fg_params=list(cex = 0.8)), colhead = list(fg_params=list(cex = 0.9)), rowhead = list(fg_params=list(cex = 0.9))) qplotFromDataTable<- qplot(1:10, 1:10, geom = "blank") + theme_bw() + theme(line = element_blank(), text = element_blank()) if ((removeRowNames)&&(removeColNames)) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL)) }else{ if (removeRowNames) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL)) }else{ if (removeColNames) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL)) }else{ qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme)) } } } return(qplotFromDataTable) } #' Put the images from all analysis on disk #' #' A function to put on disk all the images from variogram, PSD Analysis of an \code{\link{AFMImage}} #' An AFM Image 3D representation is saved on disk thanks to the \code{\link{rgl}} package. #' On Unix system, it is necessary to have a X server connection to be able to use the \code{\link{rgl}} package. #' #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} #' @param AFMImage an \code{\link{AFMImage}} #' @param exportDirectory where the images will be stored #' @export #' @author M.Beauvais putImagesFromAnalysisOnDisk<-function(AFMImageAnalyser, AFMImage, exportDirectory) { exportFractalDimImagesForReport(AFMImage, exportDirectory) exportPSDImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory) exportVariogramImagesForReport(AFMImageAnalyser, AFMImage, exportDirectory) export3DImageForReport(AFMImage, exportDirectory) } exportVariogramImagesForReport<- function(AFMImageAnalyser, AFMImage, exportDirectory) { class(AFMImageAnalyser)="AFMImageAnalyser" class(AFMImageAnalyser@variogramAnalysis)="AFMImageVariogramAnalysis" sampleName<-basename(AFMImage@fullfilename) # ssplot of real sample for comparison with predicted sample from each variogram model spplotImagefullfilename<-getSpplotImagefullfilename(exportDirectory, sampleName) saveSpplotFromAFMImage(AFMImage, spplotImagefullfilename, withoutLegend=TRUE) # directional variograms files if (!length(AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) { exportCsvFullFilename<-getDirectionalVarioCsvFullfilename(exportDirectory, sampleName) print(paste("saving", basename(exportCsvFullFilename))) tryCatch({ write.table(AFMImageAnalyser@variogramAnalysis@directionalVariograms, exportCsvFullFilename, sep=",") }, error = function(e){ print("error",e) }) dvarios<-AFMImageAnalyser@variogramAnalysis@directionalVariograms dist<-gamma<-dir.hor<-NULL p1 <- ggplot(dvarios, aes(x=dist, y=gamma, color=as.factor(dir.hor) , shape=as.factor(dir.hor))) p1 <- p1 + geom_point() p1 <- p1 + ylab("semivariance") p1 <- p1 + xlab("distance (nm)") p1 <- p1 + ggtitle("Semivariogram") p1 <- p1 + expand_limits(y = 0) p1 <- p1 + guides(colour=FALSE) #print(p1) exportpng2FullFilename<-getDirectionalVarioPngFullfilename(exportDirectory, sampleName) print(paste("saving", basename(exportpng2FullFilename))) png(filename=exportpng2FullFilename, units = "px", width=800, height=800) print(p1) dev.off() } # omnidirectional variogram files if (!length(AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)==0) { exportCsvFullFilename<-getOmnidirectionalVarioCsvFullfilename(exportDirectory, sampleName) print(paste("saving", basename(exportCsvFullFilename))) AFMImageVariogram<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram class(AFMImageVariogram)=c("gstatVariogram","data.frame") tryCatch({ write.table(AFMImageVariogram, exportCsvFullFilename, sep=",") }, error = function(e){ print("error",e) }) myvgm<-AFMImageVariogram experimentalVariogramDTnrow=nrow(myvgm) class(myvgm) = c("gstatVariogram", "data.frame") myvgm$np=rep(1,experimentalVariogramDTnrow) myvgm$dir.hor=rep(0,experimentalVariogramDTnrow) myvgm$dir.ver=rep(0,experimentalVariogramDTnrow) myvgm$id=rep(factor("var1"),experimentalVariogramDTnrow) dist<-gamma<-id<-NULL p1<-ggplot(myvgm, aes(x = dist, y = gamma, colour = id)) + geom_point() p1 <- p1 + ylab("semivariance") p1 <- p1 + xlab("distance (nm)") p1 <- p1 + ggtitle("Semivariogram") p1 <- p1 + expand_limits(y = 0) p1 <- p1 + guides(colour=FALSE) exportpng2FullFilename<-getOmnidirectionalVarioPngFullfilename(exportDirectory, sampleName) print(paste("saving", basename(exportpng2FullFilename))) png(filename=exportpng2FullFilename, units = "px", width=800, height=800) print(p1) dev.off() # chosen sample plot TheData<-as.data.frame(AFMImage@data) TheData=na.omit(TheData) part_model <- TheData[AFMImageAnalyser@variogramAnalysis@chosenFitSample, ] coordinates(part_model) = ~x+y proj4string(part_model)=CRS("+init") is.projected(part_model) pchosenFitSample<-spplot(part_model, col.regions="black",contour=TRUE,key=list(lines=FALSE, col="transparent")) expectedWidth = 400 expectHeight = 300 exportpngFullFilename<-getVarioPngchosenFitSample(exportDirectory, sampleName) print(paste("saving", basename(exportpngFullFilename))) png(filename=exportpngFullFilename, units = "px", width=expectedWidth, height=expectHeight) print(pchosenFitSample) dev.off() # save images from variogram modeling totalVariogramModels=length(AFMImageAnalyser@variogramAnalysis@variogramModels) #print(totalVariogramModels) if (totalVariogramModels>0) { fullfilename<-AFMImage@fullfilename cuts<-AFMImageAnalyser@variogramAnalysis@cuts for (i in seq(1,totalVariogramModels)) { #print(AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@res) testedModel<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@model print(testedModel) if (testedModel=="Wav2") { vgm<-vgm( 5, "Exp", 1, add.to = vgm(5, "Wav", 1, nugget = 2.5)) }else{ vgm<-vgm(5,testedModel,1,0) } mykrige<-AFMImageAnalyser@variogramAnalysis@variogramModels[[i]]@mykrige predictedspplotfullfilename<-getSpplotPredictedImageFullfilename(exportDirectory, sampleName, testedModel) saveSpplotFromKrige(predictedspplotfullfilename, vgm,mykrige,cuts, withoutLegend = TRUE) predictedAFMImage<-getAFMImageFromKrige(AFMImage, vgm, mykrige) class(predictedAFMImage) = c("AFMImage") #displayIn3D(predictedAFMImage,1024, full2Dfilename,noLight=TRUE)) export3DImageForReport(predictedAFMImage, exportDirectory) } } } } exportPSDImagesForReport<-function(AFMImageAnalyser, AFMImage, exportDirectory) { #class(AFMImageAnalyser)="AFMImageAnalyser" #class(AFMImageAnalyser@psdAnalysis)="AFMImagePSDAnalysis" filename<-basename(AFMImage@fullfilename) # export Roughness against lengthscale graph if (!length(AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) { data<-AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale r<-roughness<-NULL p1 <- ggplot(data, aes(x=r, y=roughness, colour= basename(filename))) p1 <- p1 + geom_point() p1 <- p1 + geom_line() p1 <- p1 + ylab("roughness (nm)") p1 <- p1 + xlab("lengthscale (nm)") p1 <- p1 + guides(colour=FALSE) aIntercept<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@yintersept aSlope<-AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@slope if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2)!=0){ p1 <- p1 + geom_abline(intercept = aIntercept, slope = aSlope, size=1.2) } print(paste(aIntercept, aSlope)) pngFilename=paste(filename,"roughness-against-lengthscale.png",sep="-") exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/") print(paste("saving", basename(exportpngFullFilename))) png(filename=exportpngFullFilename, units = "px", width=800, height=800) print(p1) dev.off() # focus on the first 10nm newdata<-data[r<10,] r<-roughness<-NULL p1 <- ggplot(newdata, aes(x=r, y=roughness, colour= basename(filename))) p1 <- p1 + geom_point() p1 <- p1 + geom_line() p1 <- p1 + ylab("roughness (nm)") p1 <- p1 + xlab("lengthscale (nm)") p1 <- p1 + guides(colour=FALSE) # if (length(AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1)!=0){ # p1 <- p1 + geom_abline(intercept = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@yintersept, # slope = AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@slope, # size=1.2) # } pngFilename=paste(filename,"roughness-against-lengthscale-10nm.png",sep="-") exportpngFullFilename<-paste(exportDirectory, pngFilename, sep="/") print(paste("saving", basename(exportpngFullFilename))) png(filename=exportpngFullFilename, units = "px", width=800, height=800) print(p1) dev.off() # save intersections images if (!length(AFMImageAnalyser@psdAnalysis@intersections)==0) { saveOnDiskIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, exportDirectory) } } } getGgplotFromDataTable<-function(DT, removeRowNames, removeColNames) { if (missing(removeRowNames)) removeRowNames<-TRUE if (missing(removeColNames)) removeColNames<-FALSE mytheme <- gridExtra::ttheme_default( core = list(fg_params=list(cex = 0.8)), colhead = list(fg_params=list(cex = 0.9)), rowhead = list(fg_params=list(cex = 0.9))) qplotFromDataTable<- qplot(1:15, 1:15, geom = "blank") + theme_bw() + theme(line = element_blank(), text = element_blank()) if ((removeRowNames)&&(removeColNames)) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL, cols=NULL)) }else{ if (removeRowNames) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, rows = NULL)) }else{ if (removeColNames) { qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme, cols = NULL)) }else{ qplotFromDataTable<- qplotFromDataTable + annotation_custom(grob = tableGrob(DT, theme = mytheme)) } } } return(qplotFromDataTable) } export3DImageForReport<-function(AFMImage, exportDirectory, noLight) { library(AFM) library(rgl) sampleName<-basename(AFMImage@fullfilename) rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName) if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,changeViewpoint=TRUE, noLight= noLight)) { rgl.viewpoint(zoom=2) rgl.close() } } export3DImageForReport<-function(AFMImage, exportDirectory) { sampleName<-basename(AFMImage@fullfilename) rglImagefullfilename<-get3DImageFullfilename(exportDirectory, sampleName) if (displayIn3D(AFMImage, width=1024, fullfilename=rglImagefullfilename,noLight=FALSE)) { rgl.close() } } createReportDirectory<-function(reportDirectory) { if (!file.exists(reportDirectory)){ print(paste("creating report directory",reportDirectory)) dir.create(file.path(reportDirectory), showWarnings = FALSE) } if (!isReportDirectoryWritePermissionCorrect(reportDirectory)) { stop(paste("Error: can't write to output directory", reportDirectory)) } print(paste("report directory is", reportDirectory)) } isReportDirectoryWritePermissionCorrect<-function(reportDirectory) { tryCatch({ fullfilename=paste(reportDirectory, "permCheck.txt", sep="/") fileConn<-file(fullfilename) writeLines(c("Hello","World"), fileConn) close(fileConn) file.remove(fullfilename) return(TRUE) }, error = function(e){ close(fileConn) return(FALSE) }) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMReportMaker.R
require("pracma") require("data.table") require("gstat") require(sp) require("stringr") require(gridExtra) #require(reshape2) require(ggplot2) if(getRversion() >= "3.1.0") utils::suppressForeignCheck(c("h", "x", "y","TheData","chosenFitSample","predict.gstat")) #' @title AFM Image Variogram Model class #' #' @description \code{AFMImageVariogramModel}stores the evaluation of one experimental variogram model #' #' @slot model the variogram model name #' @slot fit.v the values from the \code{\link[gstat]{fit.variogram}} function in the gstat package #' @slot mykrige the values from the \code{\link[gstat]{krige}} function in the gstat library #' @slot res a data.table to store: (cor) the correlation between the predicted sample and the real sample (press) the sum of the square of the differences between real and predicted values for each point of the sample #' @slot cor to be removed ? #' @slot press to be removed ? #' @slot sill to be removed ? #' @slot imageFullfilename to be removed ? #' @name AFMImageVariogramModel-class #' @rdname AFMImageVariogramModel-class #' @exportClass AFMImageVariogramModel #' @author M.Beauvais AFMImageVariogramModel<-setClass("AFMImageVariogramModel", slots = c(model="character", fit.v="data.table", mykrige="SpatialPointsDataFrame", res="data.table", cor="numeric", press="numeric", sill="numeric", imageFullfilename="character") ) #' Constructor method of AFMImageVariogramModel Class. #' #' @param .Object an AFMImageVariogramModel object #' @param model the variogram model name #' @param fit.v the values from the \code{\link[gstat]{fit.variogram}} function in the gstat package #' @param mykrige the values from the \code{\link[gstat]{krige}} function in the gstat library #' @param res a data.table to store: (cor) the correlation between the predicted sample and the real sample (press) the sum of the square of the differences between real and predicted values for each point of the sample #' @param cor to be removed ? #' @param press to be removed ? #' @param sill to be removed ? #' @param imageFullfilename to be removed ? #' @rdname AFMImageVariogramModel-class #' @export setMethod("initialize", "AFMImageVariogramModel", function(.Object, model, fit.v=data.table(), mykrige, res=data.table(), cor, press, sill, imageFullfilename) { if(!missing(model)) .Object@model<- model #if(!missing(fit.v)) [email protected]<- fit.v if(!missing(mykrige)) .Object@mykrige<- mykrige #if(!missing(res)) .Object@res<- res if(!missing(cor)) .Object@cor<- cor if(!missing(press)) .Object@press<- press if(!missing(sill)) .Object@sill<- sill if(!missing(imageFullfilename)) .Object@imageFullfilename<- imageFullfilename validObject(.Object) return(.Object) }) #' Wrapper function AFMImageVariogramModel #' #' @rdname AFMImageVariogramModel-class #' @export AFMImageVariogramModel <- function() { return(new("AFMImageVariogramModel")) } #' @title AFM Image log-log experimental variogram slope analysis #' #' @description \code{omniVariogramSlopeAnalysis} stores the analysis of the second slope in roughness against lenghtscale #' #' @slot intersection_sill to be removed ? #' @slot sill to be removed ? #' @slot slope to be removed ? #' @slot yintersept to be removed ? #' @name omniVariogramSlopeAnalysis-class #' @rdname omniVariogramSlopeAnalysis-class #' @exportClass omniVariogramSlopeAnalysis #' @author M.Beauvais omniVariogramSlopeAnalysis<-setClass("omniVariogramSlopeAnalysis", slots = c(intersection_sill="numeric", sill="numeric", slope="numeric", yintersept="numeric", tangente_point1="numeric", tangente_point2="numeric"), validity = function(object) { return(TRUE) } ) #' Constructor method of omniVariogramSlopeAnalysis Class. #' #' @param .Object an omniVariogramSlopeAnalysis object #' @rdname omniVariogramSlopeAnalysis-class #' @export setMethod("initialize", "omniVariogramSlopeAnalysis", function(.Object) { .Object@intersection_sill<-0 .Object@sill<-0 .Object@slope<-0 .Object@yintersept<-0 .Object@tangente_point1<-0 .Object@tangente_point2<-0 validObject(.Object) ## valide l'objet return(.Object) }) #' Wrapper function omniVariogramSlopeAnalysis #' #' @rdname omniVariogramSlopeAnalysis-class #' @export omniVariogramSlopeAnalysis <- function() { return(new("omniVariogramSlopeAnalysis")) } #' @title AFM image variogram analysis class #' #' @description \code{AFMImageVariogramAnalysis} manages the variogram analysis of an \code{\link{AFMImage}} #' #' @slot width (optional) a distance step for the calculation of the variograms #' (e.g.: width= integer of (scan Size divided by number of lines)= ceil(1000 / 512) for AFMImageOfAluminiumInterface #' @slot omnidirectionalVariogram a data.table to store the omnidirectional variogram #' @slot variogramSlopeAnalysis a AFMImageVariogramAnalysis to analyse slope in log log omnidirectional semivariogram #' @slot directionalVariograms a data.table to store the directional variograms #' @slot sampleFitPercentage a sample size as a percentage of random points in the \code{\link{AFMImage}}. These points will be used to fit the variogram models. #' @slot chosenFitSample the chosen random points of the \code{\link{AFMImage}} to perform the fitting of the variogram models. #' @slot cuts the cuts for spplot of the \code{\link{AFMImage}}. The same cuts will be used for the predicted \code{\link{AFMImage}} #' @slot variogramModels A list of \code{\link{AFMImageVariogramModel}} containing the various evaluated variogram models. #' @slot fullfilename to be removed ? #' @slot updateProgress a function to update a graphical user interface #' @name AFMImageVariogramAnalysis-class #' @rdname AFMImageVariogramAnalysis-class #' @exportClass AFMImageVariogramAnalysis #' @author M.Beauvais #' AFMImageVariogramAnalysis<-setClass("AFMImageVariogramAnalysis", slots = c( width="numeric", sampleVariogramPercentage="numeric", omnidirectionalVariogram="data.table", variogramSlopeAnalysis="omniVariogramSlopeAnalysis", expectedSill="numeric", expectedRange="numeric", directionalVariograms="data.table", sampleFitPercentage="numeric", sampleValidatePercentage="numeric", chosenFitSample="numeric", cuts="numeric", variogramModels="list", fullfilename="character", updateProgress="function"), validity = function(object) { if (object@sampleFitPercentage > 1 ) { ## sample can't be more than 100% print("sample fit percentage can't be more than 100%") return(FALSE) } else return(TRUE) } ) #' Constructor method of AFMImageVariogramAnalysis Class. #' #' @param .Object an AFMImageVariogramAnalysis class #' @param sampleFitPercentage a sample size as a percentage (e.g. "5" for 5 percents) of random points in the \code{\link{AFMImage}}. These points will be used to fit the variogram models. #' @param updateProgress a function to update a graphical user interface #' @rdname AFMImageVariogramAnalysis-class #' @export setMethod("initialize", "AFMImageVariogramAnalysis", function(.Object, sampleFitPercentage, updateProgress) { if (!missing(updateProgress)) { .Object@updateProgress<-updateProgress } .Object@width<-0 .Object@sampleFitPercentage <- sampleFitPercentage .Object@omnidirectionalVariogram<-data.table() .Object@directionalVariograms<-data.table() #.Object@updateProgress validObject(.Object) ## valide l'objet return(.Object) }) #' Wrapper function AFMImageVariogramAnalysis #' #' @rdname AFMImageVariogramAnalysis-class #' @export AFMImageVariogramAnalysis <- function(sampleFitPercentage) { return(new("AFMImageVariogramAnalysis", sampleFitPercentage=sampleFitPercentage)) } #' Method \code{variogramModels} returns a list of variogram model evaluation #' @name AFMImageVariogramAnalysis-class #' @rdname AFMImageVariogramAnalysis-class #' @param object a \code{AFMImageVariogramAnalysis} object setGeneric("variogramModels",function(object){standardGeneric("variogramModels")}) setGeneric(name= "variogramModels<-", def= function(AFMImageVariogramAnalysis, value) { return(standardGeneric("variogramModels<-")) }) #' @rdname AFMImageVariogramAnalysis-class #' @aliases variogramModels setMethod("variogramModels",signature=signature(object='AFMImageVariogramAnalysis'), function(object) { return(object@variogramModels) } ) setReplaceMethod(f="variogramModels", signature(AFMImageVariogramAnalysis = "AFMImageVariogramAnalysis", value = "list"), definition= function(AFMImageVariogramAnalysis, value) { if (is.null(value)) { print("variogramModels is null") return(AFMImageVariogramAnalysis) } AFMImageVariogramAnalysis@variogramModels <- value return(AFMImageVariogramAnalysis) }) #' Method \code{omnidirectionalVariogram} returns the omnidirectional variogram #' @name AFMImageVariogramAnalysis-class #' @rdname AFMImageVariogramAnalysis-class setGeneric("omnidirectionalVariogram",function(object){standardGeneric("omnidirectionalVariogram")}) setGeneric(name= "omnidirectionalVariogram<-", def= function(AFMImageVariogramAnalysis, value) { return(standardGeneric("omnidirectionalVariogram<-")) }) #' @rdname AFMImageVariogramAnalysis-class #' @aliases omnidirectionalVariogram setMethod("omnidirectionalVariogram",signature=signature(object='AFMImageVariogramAnalysis'), function(object) { return(object@omnidirectionalVariogram) } ) setReplaceMethod(f="omnidirectionalVariogram", signature(AFMImageVariogramAnalysis = "AFMImageVariogramAnalysis", value = "data.table"), definition= function(AFMImageVariogramAnalysis, value) { AFMImageVariogramAnalysis@omnidirectionalVariogram <- value return(AFMImageVariogramAnalysis) }) #' Method \code{directionalVariograms} returns the directional variograms #' @name AFMImageVariogramAnalysis-class #' @rdname AFMImageVariogramAnalysis-class setGeneric("directionalVariograms",function(object){standardGeneric("directionalVariograms")}) setGeneric(name= "directionalVariograms<-", def= function(AFMImageVariogramAnalysis, value) { return(standardGeneric("directionalVariograms<-")) }) #' @rdname AFMImageVariogramAnalysis-class #' @aliases directionalVariograms setMethod("directionalVariograms",signature=signature(object='AFMImageVariogramAnalysis'), function(object) { return(object@directionalVariograms) } ) setReplaceMethod(f="directionalVariograms", signature(AFMImageVariogramAnalysis = "AFMImageVariogramAnalysis", value = "data.table"), definition= function(AFMImageVariogramAnalysis, value) { AFMImageVariogramAnalysis@directionalVariograms <- value return(AFMImageVariogramAnalysis) }) #' getDTModelEvaluation method #' #' @param AFMImageVariogramAnalysis an AFMImageVariogramAnalysis object #' @rdname AFMImageVariogramAnalysis-getDTModelEvaluation-method #' @exportMethod getDTModelEvaluation setGeneric(name= "getDTModelEvaluation", def= function(AFMImageVariogramAnalysis) { return(standardGeneric("getDTModelEvaluation")) }) #' @name getDTModelEvaluation #' @aliases getDTModelEvaluation getDTModelEvaluation,AFMImageVariogramAnalysis-method #' @docType methods #' @rdname AFMImageVariogramAnalysis-getDTModelEvaluation-method #' @export setMethod(f="getDTModelEvaluation", "AFMImageVariogramAnalysis", definition= function(AFMImageVariogramAnalysis) { res = data.table() totalL<-length(AFMImageVariogramAnalysis@variogramModels) if (totalL>0) { res = data.table() for (i in seq(1:totalL)){ res=rbind(res, AFMImageVariogramAnalysis@variogramModels[[i]]@res) } } return(res) }) #' getDTModelSillRange method #' #' @param AFMImageVariogramAnalysis an AFMImageVariogramAnalysis object #' @rdname AFMImageVariogramAnalysis-getDTModelSillRange-method #' @exportMethod getDTModelSillRange setGeneric(name= "getDTModelSillRange", def= function(AFMImageVariogramAnalysis) { return(standardGeneric("getDTModelSillRange")) }) #' @name getDTModelSillRange #' @aliases getDTModelSillRange getDTModelSillRange,AFMImageVariogramAnalysis-method #' @docType methods #' @rdname AFMImageVariogramAnalysis-getDTModelSillRange-method #' @export setMethod(f="getDTModelSillRange", "AFMImageVariogramAnalysis", definition= function(AFMImageVariogramAnalysis) { res = data.table() totalL<-length(AFMImageVariogramAnalysis@variogramModels) if (totalL>0) { res = data.table() for (i in seq(1:totalL)){ fit.v<-AFMImageVariogramAnalysis@variogramModels[[i]]@fit.v model<-paste(fit.v$model[1], fit.v$model[2],sep=", ") #print(model) value<-data.frame(model, sill=sum(fit.v$psill), range = sum(fit.v$range)) res=rbind(res,value) } } return(res) }) #' Method \code{variogramSlopeAnalysis} returns the slope anaylis on the log-log omnidirectional experimental semi variogram #' @name AFMImageVariogramAnalysis-class #' @rdname AFMImageVariogramAnalysis-class setGeneric("variogramSlopeAnalysis",function(object){standardGeneric("variogramSlopeAnalysis")}) setGeneric(name= "variogramSlopeAnalysis<-", def= function(AFMImageVariogramAnalysis, value) { return(standardGeneric("variogramSlopeAnalysis<-")) }) #' @rdname AFMImageVariogramAnalysis-class #' @aliases variogramSlopeAnalysis setMethod("variogramSlopeAnalysis",signature=signature(object='AFMImageVariogramAnalysis'), function(object) { return(object@variogramSlopeAnalysis) } ) setReplaceMethod(f="variogramSlopeAnalysis", signature(AFMImageVariogramAnalysis = "AFMImageVariogramAnalysis", value = "omniVariogramSlopeAnalysis"), definition= function(AFMImageVariogramAnalysis, value) { AFMImageVariogramAnalysis@variogramSlopeAnalysis <- value return(AFMImageVariogramAnalysis) }) #' evaluateVariogramModels method to evaluate the basic variogram models #' #' evaluateVariogramModels method to evaluate the basic variogram models available in the \code{\link{gstat}} package #' A \code{\link{AFMImageVariogramAnalysis}} method to handle the variogram analysis of an \code{\link{AFMImage}}. #' The variogram models used can be seen with the show.vgms() function from the \code{\link{gstat}} package. #' #' @param AFMImageVariogramAnalysis an object #' @param AFMImage an \code{\link{AFMImage}} #' @exportMethod evaluateVariogramModels #' @rdname AFMImageVariogramAnalysis-evaluateVariogramModels-method #' @examples #' \dontrun{ #' library(AFM) #' #' data("AFMImageOfRegularPeaks") #' # take an extract of the image to fasten the calculation #' AFMImage<-extractAFMImage(AFMImageOfRegularPeaks, 40, 40, 32) #' # e.g. AFMImage@@fullfilename<-"/users/ubuntu/AFMImageOfRegularPeaks-extract.txt" #' AFMImage@@fullfilename<-paste(tempdir(), "AFMImageOfRegularPeaks-extract.txt", sep="/") #' #' AFMImageAnalyser<-AFMImageAnalyser(AFMImage) #' #' # Variogram analysis #' sampleFitPercentage<-3.43/100 #' variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage) #' variogramAnalysis@@omnidirectionalVariogram<- #' AFM::calculateOmnidirectionalVariogram(AFMImage=AFMImage, #' AFMImageVariogramAnalysis=variogramAnalysis) #' variogramAnalysis@@directionalVariograms<- #' AFM::calculateDirectionalVariograms(AFMImage=AFMImage, #' AFMImageVariogramAnalysis=variogramAnalysis) #' #' # manage model evaluations #' AFMImageVariogram<-variogramAnalysis@@omnidirectionalVariogram #' class(AFMImageVariogram)=c("gstatVariogram","data.frame") #' variogramAnalysis<-evaluateVariogramModels(variogramAnalysis, AFMImage) #' #' mergedDT<-getDTModelEvaluation(variogramAnalysis) #' mergedDT #' sillRangeDT<-getDTModelSillRange(variogramAnalysis) #' sillRangeDT #' } #' setGeneric(name= "evaluateVariogramModels", def= function(AFMImageVariogramAnalysis, AFMImage) { return(standardGeneric("evaluateVariogramModels")) }) #' @name evaluateVariogramModels #' @aliases evaluateVariogramModels evaluateVariogramModels,AFMImageVariogramAnalysis-method #' @docType methods #' @rdname AFMImageVariogramAnalysis-evaluateVariogramModels-method #' @export setMethod(f="evaluateVariogramModels", "AFMImageVariogramAnalysis", definition= function(AFMImageVariogramAnalysis, AFMImage) { AFMImageVariogram<-AFMImageVariogramAnalysis@omnidirectionalVariogram AFMImagesampleFitPercentage<-AFMImageVariogramAnalysis@sampleFitPercentage class(AFMImageVariogram)=c("gstatVariogram","data.frame") filename<-basename(AFMImage@fullfilename) # get the cut #print(typeof(getCutsOfSpplotFromAFMImage(AFMImage))) AFMImageVariogramAnalysis@cuts<-getCutsOfSpplotFromAFMImage(AFMImage) # 3.43% for 9000 points to model # use 512*512 - 9000 to validate #AFMImagesampleFitPercentage<- 3.43/100 TheData<-as.data.frame(AFMImage@data) TheData=na.omit(TheData) # We randomly split the data into two parts. # From the data, AFMImagesampleFitPercentage observations will be used for variogram modeling # and the rest will be used for prediction and evaluation totalSampleSize<-AFMImage@samplesperline*AFMImage@lines choose<-floor(totalSampleSize*AFMImagesampleFitPercentage) chosenFitSample<-sample(1:totalSampleSize, choose) AFMImageVariogramAnalysis@chosenFitSample<-chosenFitSample #print(paste("Kriging sample size", choose)) #print(paste("Validation sample size", choose)) part_model <- TheData[chosenFitSample, ] part_model2 <- TheData[chosenFitSample, ] coordinates(part_model) = ~x+y #proj4string(part_model)=CRS("+init") proj4string(part_model)=CRS() is.projected(part_model) part_valid <- TheData[-chosenFitSample, ] part_valid2 <- TheData[-chosenFitSample, ] part_valid <- TheData part_valid2 <- TheData coordinates(part_valid) = ~x+y #proj4string(part_valid)=CRS("+init") proj4string(part_valid)=CRS() is.projected(part_valid) # starting value to fit the models expectedSill<-calculateWavModelExpectedSill(AFMImageVariogram) expectedRange<-calculateWavModelExpectedRange(AFMImageVariogram, expectedSill) expectedNugget<-0 print(paste("expected sill for wav model is", expectedSill)) print(paste("expected range for wav model is", expectedRange)) # get all vgm models allVariogramModelEvaluation<-c() notUsedModels<-c("Nug","Int", "Err", "Lin", "Pow", "Leg", "Spl") notUsedModels<-c("Nug","Int", "Err", "Lin", "Pow", "Leg", "Spl") #"Nug","Exp","Sph","Gau","Exc","Mat","Ste","Cir","Lin","Bes","Pen","Per","Wav","Hol","Log","Pow","Spl","Leg","Err","Int" # TODO MB notUsedModels<-c("Nug","Exp","Sph","Gau","Exc","Mat","Ste","Cir","Lin","Bes","Per","Wav","Hol","Log","Pow","Spl","Leg","Err","Int") # for updateProgress counter<-0 totalCounter<-3*(length(vgm()$short)-length(notUsedModels)) for (testedModel in vgm()$short){ if (match(testedModel, notUsedModels, nomatch = FALSE)) { print(paste("the model", testedModel,"will not be used")) }else { variogramModelEvaluation <- new("AFMImageVariogramModel") #print(testedModel) modelName<-paste(testedModel) tryCatch({ psill<-expectedSill range<-expectedRange expectedNugget<-0 fitsills = c(FALSE, TRUE) if(testedModel=="Wav") { fitsills = c(TRUE, TRUE) } if(testedModel=="Exp") { psill<-expectedSill range<-expectedRange/2.2 } if(testedModel=="Bes") { psill<-expectedSill range<-expectedRange/4.4 } if (testedModel=="Ste") { psill<-expectedSill range<-expectedRange*0.6 } if (testedModel=="Gau") { psill<-5 range<-1 } if (testedModel=="Log") { psill<-5 range<-1 } if (testedModel=="Pow") { psill<-5 range<-1 } if (testedModel=="Exc") { psill<-5 range<-1 expectedNugget<-expectedSill fitsills = c(TRUE, TRUE) } if (testedModel=="Hol") { psill<-expectedSill range<-expectedRange/2 expectedNugget<-0 } if (testedModel=="Per") { psill<-expectedSill/2 range<-expectedRange expectedNugget<-expectedSill fitsills = c(TRUE, TRUE) } if (testedModel=="Leg") { psill<-expectedSill/2 range<-expectedRange } # fit model to experimental variogram #vgm<-vgm(psill= psill,model=testedModel,range= range,nugget=0) print(paste("Fit variogram for model", testedModel, "...")) print(paste("expected sill ", psill)) print(paste("expected range ", range)) print(paste("expected nugget ", expectedNugget)) vgm<-vgm(psill= psill,model=testedModel,range= range,nugget=expectedNugget) #TODO print("1") if (!is.null(AFMImageVariogramAnalysis@updateProgress)) { if (is.function(AFMImageVariogramAnalysis@updateProgress)&& !is.null(AFMImageVariogramAnalysis@updateProgress())) { counter<-counter+1 AFMImageVariogramAnalysis@updateProgress(detail=paste0("fitting ",modelName, " model"), value=counter/totalCounter) } } print("2") fit.v <- fit.variogram(AFMImageVariogram, vgm, fit.sills = fitsills, warn.if.neg=TRUE) #print(fit.v$psill) if (as.numeric(fit.v$psill[2]) > 0) { # krigging on a sample #print("Kriging...") if (!is.null(AFMImageVariogramAnalysis@updateProgress)) { if (is.function(AFMImageVariogramAnalysis@updateProgress)&& !is.null(AFMImageVariogramAnalysis@updateProgress())) { counter<-counter+1 AFMImageVariogramAnalysis@updateProgress(detail=paste0("krigging ",modelName, " model"), value=counter/totalCounter) } } mykrige<-mykrigefunction(fit.v, part_model, part_valid) # Evaluate quality of krigging print("Evaluate quality...") if (!is.null(AFMImageVariogramAnalysis@updateProgress)) { if (is.function(AFMImageVariogramAnalysis@updateProgress)&& !is.null(AFMImageVariogramAnalysis@updateProgress())) { counter<-counter+1 AFMImageVariogramAnalysis@updateProgress(detail=paste0("evaluating ",modelName, " model"), value=counter/totalCounter) } } myStatsFromKrige<-statsFromKrige(filename, vgm, part_valid,mykrige) res<-data.table(myStatsFromKrige) print(res$cor) if (!is.na(res$cor)){ variogramModelEvaluation@model<-testedModel [email protected]<-data.table(fit.v) variogramModelEvaluation@mykrige<-mykrige variogramModelEvaluation@res<-data.table(myStatsFromKrige) allVariogramModelEvaluation<-c(allVariogramModelEvaluation, variogramModelEvaluation) print("done") }else{ print(paste("correlation not calculated for model", testedModel, sep=" ")) } }else{ print(paste("sill is negative for model", testedModel, sep=" ")) } }, warning=function(w) { message(paste("warning with", testedModel,w)) counter<-counter+1 } ,error=function(e) message(paste("Pb2 with", testedModel,e)) ) } } if(!is.null(allVariogramModelEvaluation)) AFMImageVariogramAnalysis@variogramModels<-allVariogramModelEvaluation # for each model, evaluate it return(AFMImageVariogramAnalysis) }) #' @title updateProgress #' @description is a function used by a GUI such as shiny GUI #' @name updateProgress #' @aliases updateProgress updateProgress,AFMImageVariogramAnalysis-method #' @docType methods #' @param AFMImageVariogramAnalysis an \code{\link{AFMImageVariogramAnalysis}} #' @param value shiny progress bar value #' @param detail shiny progress bar detail #' @param message shiny progress bar message #' @rdname AFMImageVariogramAnalysis-updateProgress-method #' @export setGeneric(name= "updateProgress", def= function(AFMImageVariogramAnalysis, value, detail, message) { return(standardGeneric("updateProgress")) }) calculateWavModelExpectedSill<-function(omnidirectionalVariogram) { return(floor(mean(tail(omnidirectionalVariogram$gamma, floor(length(omnidirectionalVariogram$gamma)/2))))) } calculateWavModelExpectedRange<-function(omnidirectionalVariogram, expectedSill) { # first intersection (lowest distance) between expectedSill and the omnidirectionalVariogram indexDistMin<-head(which(omnidirectionalVariogram$gamma > expectedSill),n=1) if (indexDistMin>0) { expectedRange<-(omnidirectionalVariogram$dist[indexDistMin]) # find the fist peak thresh = 0 x<-tail(omnidirectionalVariogram$gamma, n= length(omnidirectionalVariogram$gamma)-indexDistMin) pks <- which(diff(sign(diff(x, na.pad = FALSE)), na.pad = FALSE) < 0) + 2 if (!missing(thresh)) { pks[x[pks - 1] - x[pks] > thresh] } else pks if (length(pks)>0) { #print(paste("pks",pks)) expectedRange2<-(omnidirectionalVariogram$dist[pks[1]+indexDistMin]) print(expectedRange) print(expectedRange2) expectedRange<-(expectedRange+ expectedRange2)/2 } } else expectedRange<-1 return(floor(expectedRange)) } #' calculate a width to be used for experimental variogram calculation #' #' calculate a width to be used for experimental variogram calculation in order to generate a line #' instead of a cloud of points. If the chosen width is too small, the experimental variogram will #' be a cloud of points instead of a line. #' #' \code{getAutomaticWidthForVariogramCalculation} returns the width to be used for variogram calculation #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @return the smallest width to be used for variogram calculation #' @author M.Beauvais #' @rdname AFMImageVariogramAnalyser-getAutomaticWidthForVariogramCalculation #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' print(getAutomaticWidthForVariogramCalculation(AFMImageOfAluminiumInterface)) #' } #' getAutomaticWidthForVariogramCalculation<-function(AFMImage){ return(ceiling(max(AFMImage@hscansize/AFMImage@samplesperline, AFMImage@vscansize/AFMImage@lines))) } #' Calculate experimental omnidirectional semi-variogram #' #' \code{calculateOmnidirectionalVariogram} returns the semivariance calculated for all the directions #' calculate the experimental omnidirectional variogram of an \code{\link{AFMImage}} with the \code{\link[gstat]{variogram}} function of the gstat package. #' The experimental semi-variogram is used to fit (find the best sill and range) the theoretical variogram models. #' With 512*512 images, it takes several minutes to calculate. #' #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageVariogramAnalysis an \code{\link{AFMImageVariogramAnalysis}} to manage and store the result of variogram analysis #' @return the semivariance calculated in all the directions #' @rdname AFMImageVariogramAnalyser-calculateOmnidirectionalVariogram #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfRegularPeaks) #' variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage=3.43/100) #' avario<-AFM::calculateOmnidirectionalVariogram(AFMImageVariogramAnalysis= variogramAnalysis, #' AFMImage= AFMImageOfRegularPeaks) #' dist<-gamma<-NULL #' p <- ggplot(avario, aes(x=dist, y=gamma)) #' p <- p + geom_point() #' p <- p + geom_line() #' p <- p + ylab("semivariance") #' p <- p + xlab("distance (nm)") #' p <- p + ggtitle("Experimental semivariogram") #' p #' } calculateOmnidirectionalVariogram<- function(AFMImageVariogramAnalysis, AFMImage) { if (is.null(AFMImageVariogramAnalysis@width)||(AFMImageVariogramAnalysis@width==0)) { AFMImageVariogramAnalysis@width=getAutomaticWidthForVariogramCalculation(AFMImage) print(paste("using automatic width of", AFMImageVariogramAnalysis@width)) } width<-AFMImageVariogramAnalysis@width print(paste("calculating omnidirectional variogram using width of", width,"...")) data<-AFMImage@data x<-y<-NULL setkey(data,x,y) coordinates(data) = ~x+y #proj4string(data)=sp::CRS("+no_defs") proj4string(data)=CRS() is.projected(data) return(data.table(gstat::variogram(data$h ~ x+y , data, width=width))) } #' Calculate experimental directional semi-variograms #' #' calculate four experimental directional variograms of an \code{\link{AFMImage}} with the \code{\link[gstat]{variogram}} function of the gstat package. #' The directional semi-variogram can be used to check the isotropy of the sample. #' Note: The sample will be isotropic if the slopes of the four variograms are similar. #' #' \code{calculateDirectionalVariograms} returns the directional variograms #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param AFMImageVariogramAnalysis an \code{\link{AFMImageVariogramAnalysis}} to manage and store the result of variogram analysis #' @return Four directional variograms #' @rdname AFMImageVariogramAnalyser-calculateDirectionalVariograms #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfRegularPeaks) #' variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage=3.43/100) #' varios<-AFM::calculateDirectionalVariograms(AFMImage= AFMImageOfRegularPeaks, #' AFMImageVariogramAnalysis= variogramAnalysis) #' dist<-gamma<-NULL #' p <- ggplot(varios, aes(x=dist, y=gamma, #' color= as.factor(dir.hor), #' shape=as.factor(dir.hor))) #' p <- p + expand_limits(y = 0) #' p <- p + geom_point() #' p <- p + geom_line() #' p <- p + ylab("semivariance (nm^2)") #' p <- p + xlab("distance (nm)") #' p <- p + ggtitle("Directional") #' p #' } calculateDirectionalVariograms<- function(AFMImageVariogramAnalysis,AFMImage) { print("calculating directional variograms...") if (is.null(AFMImageVariogramAnalysis@width)||(AFMImageVariogramAnalysis@width==0)) { AFMImageVariogramAnalysis@width<-getAutomaticWidthForVariogramCalculation(AFMImage) print(paste("using automatic width of", AFMImageVariogramAnalysis@width)) } width<-AFMImageVariogramAnalysis@width x<-y<-NULL data<-AFMImage@data setkey(data,x,y) coordinates(data) = ~x+y #proj4string(data)=CRS("+init") proj4string(data)=CRS() is.projected(data) return(data.table(gstat::variogram(data$h ~ x+y, data, width=width, alpha = c(0, 45, 90, 135)))) } mykrigefunction<-function(fit.v, part_model, part_valid) { #The predictions will be performed on the other spatial locations than the part valid data set krige(h ~ x+y, part_model, part_valid, model = fit.v) } statsFromKrige<-function(name, vgm, part_valid, part_valid_pr) { modelName<-paste(vgm$model, collapse=", ") #Compute the difference between the predicted values and the true values: difference <- part_valid$h - part_valid_pr$var1.pred #summary(difference) #Compute the prediction sum of squares PRESS <- sum(difference^2) data.frame(name= name ,model=modelName, cor= cor(part_valid_pr$var1.pred,part_valid$h), press=PRESS) } #' Calculate slopes and intersections in variogram #' \code{getAutoIntersectionForOmnidirectionalVariogram} returns the slope in the omnidirectional variograms #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} #' @return an \code{\link{omniVariogramSlopeAnalysis}} #' @author M.Beauvais #' @export getAutoIntersectionForOmnidirectionalVariogram<-function(AFMImageAnalyser){ data<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram data<-data.table(dist=log10(data$dist), gamma=log10(data$gamma)) #data$dist<-as.numeric(data$dist) aval<-max(data$dist) index<-which(data$dist<= aval)[1] lengthData<-length(data$dist)-index print(paste("lengthData=",lengthData)) minimumR <- function(data, space, x, y) { lengthData<-nrow(data) aorigin<-0 #borigin <- data[lengthData]$gamma borigin <- max(data$gamma) #print(borigin) finalres2=c() finalres = c(Inf,0,0) for (i in seq(1, length(x))) { x1=x[i] for (j in seq(1, length(y))) { if (abs(j-i)>=space) { x2=y[j] if ((x1<1)||(x2<1)||(x1>lengthData)||(x2>lengthData)||(x1==x2)) { inter <- data[1]$dist } else{ if (x1<x2) { myx=data[seq(x1,x2)]$dist myy=data[seq(x1,x2)]$gamma } if (x1>x2) { myx=data[seq(x2,x1)]$dist myy=data[seq(x2,x1)]$gamma } res <- lm(myy~myx) b<-unname(res$coefficients[1]) a<-unname(res$coefficients[2]) inter <- (borigin-b)/a if ((inter<finalres[1])&(inter>0)) { finalres=c(inter, x1, x2, borigin,a,b) print(finalres) } } } #print(paste(x1, x2)) } #finalres2=c(finalres2, inter) } omniVariogramSlopeAnalysis = new("omniVariogramSlopeAnalysis") omniVariogramSlopeAnalysis@intersection_sill=finalres[1] omniVariogramSlopeAnalysis@tangente_point1=finalres[2] omniVariogramSlopeAnalysis@tangente_point2=finalres[3] omniVariogramSlopeAnalysis@sill=finalres[4] omniVariogramSlopeAnalysis@slope=finalres[5] omniVariogramSlopeAnalysis@yintersept=finalres[6] #print(omniVariogramSlopeAnalysis) return(omniVariogramSlopeAnalysis) } lengthData<-nrow(data) #newMax=ceiling(data[c(lengthData),]$dist/20) newMax=2 # if (second_slope==FALSE) { aby<-1 print(aby) x <- seq(1, newMax,by=aby) print(x) z <- minimumR(data, space= 1, x,x) # } else { # aby<-1 # print(aby) # space=ceiling(lengthData/4) # print(space) # x <- seq(newMax,lengthData, by=aby) # z <- minimumR(data, space= space, x,x) # } return(z) } #' Get the graph of the Log Log omnidiretction variogram #' \code{getLogLogOmnidirectionalSlopeGraph} returns Get the graph of the Log Log omnidirectional variogram #' @param AFMImageAnalyser an \code{\link{AFMImageAnalyser}} #' @param withFratcalSlope a boolean to indicate if the graph should contain a line representating the slope for the calculation of the fractal index and topothesy #' @return a ggplot2 graph #' @author M.Beauvais #' @export #' @examples #' \dontrun{ #' library(AFM) #' library(ggplot2) #' #' data(AFMImageOfRegularPeaks) #' #'AFMImageAnalyser = new("AFMImageAnalyser", #' fullfilename="/home/ubuntu/AFMImageOfRegularPeaks-Analyser.txt") #'variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage=3.43/100) #'AFMImageAnalyser@variogramAnalysis<-variogramAnalysis #'AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram<- #' calculateOmnidirectionalVariogram(AFMImage= AFMImageOfRegularPeaks, #' AFMImageVariogramAnalysis= variogramAnalysis) #'p<-getLogLogOmnidirectionalSlopeGraph(AFMImageAnalyser, withFratcalSlope=TRUE) #'p #' } getLogLogOmnidirectionalSlopeGraph<-function(AFMImageAnalyser, withFratcalSlope=FALSE) { tryCatch({ omniVariogramSlopeAnalysis=getAutoIntersectionForOmnidirectionalVariogram(AFMImageAnalyser) # resVarioDT <- data.table( # name=basename(AFMImage@fullfilename), # variogram_slope=omniVariogramSlopeAnalysis@slope # ) # # if (!exists("resVario")) { # resVario<-copy(resVarioDT) # }else{ # resVario=rbind(resVario,resVarioDT) # } # print(resVario) # # # # AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram # # AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram[5,] # # AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram[7,] # dist<-gamma<-id<-NULL myvgm<-AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram myvgm<-data.table(dist=log10(myvgm$dist), gamma=log10(myvgm$gamma)) p1<-ggplot(myvgm, aes(x = dist, y = gamma)) + geom_point() p1 <- p1 + ylab("log semivariance") p1 <- p1 + xlab("log lag distance (nm)") if (withFratcalSlope) { omniVariogramSlopeAnalysis=getAutoIntersectionForOmnidirectionalVariogram(AFMImageAnalyser) p1 <- p1 + geom_abline(intercept = omniVariogramSlopeAnalysis@yintersept, slope = omniVariogramSlopeAnalysis@slope) } p1 <- p1 + ggtitle(paste0(basename(AFMImage@fullfilename)," semivariance - slope=", omniVariogramSlopeAnalysis@slope )) p1 <- p1 + expand_limits(y = 0) p1 <- p1 + guides(colour=FALSE) return(p1) # png(file = paste0(exportDirectory,"/",basename(AFMImage@fullfilename),"-variograms.png"), bg = "transparent", width = 1024, height = 768) # print(p1) # dev.off() }, error = function(e) {print(paste("Impossible to find variograms intersections automaticaly",e))}) } saveSpplotFromKrige<-function(fullfilename, modelName, part_valid_pr, cuts, withoutLegend) { if(missing(withoutLegend)) { withoutLegend=FALSE } expectedWidth = 400 expectHeight = 300 colLimit<-length(cuts)+3 cols <- getSpplotColors(colLimit) if (withoutLegend) { p<-spplot(part_valid_pr["var1.pred"], cuts=cuts, col.regions=cols,key=list(lines=FALSE, col="transparent")) }else{ p<-spplot(part_valid_pr["var1.pred"], cuts=cuts, col.regions=cols) } print(paste("saving", basename(fullfilename))) png(filename=fullfilename, units = "px", width=expectedWidth, height=expectHeight) print(p) dev.off() } getAFMImageFromKrige<-function(AFMImage, vgm, part_valid_pr) { predictedImageFilename<-paste(basename(AFMImage@fullfilename), vgm$model[2], "predicted",sep="-") predictedImageFullFilename<-paste(dirname(AFMImage@fullfilename), predictedImageFilename, sep="/") AFMImage(data = data.table(x = AFMImage@data$x, y = AFMImage@data$y, h = part_valid_pr["var1.pred"]@data), samplesperline = AFMImage@samplesperline, lines = AFMImage@lines, hscansize = AFMImage@hscansize, vscansize = AFMImage@vscansize, scansize = AFMImage@scansize, fullfilename = predictedImageFullFilename) } getSpplotColors<-function(colLimit) { blues9[3:colLimit] } getCutsOfSpplotFromAFMImage<-function(AFMImage) { initialAFMImage<-as.data.frame(AFMImage@data) coordinates(initialAFMImage) = ~x+y #proj4string(initialAFMImage)=CRS("+init") proj4string(initialAFMImage)=CRS() is.projected(initialAFMImage) p<-spplot(initialAFMImage["h"]) mystr<-unlist(strsplit( as.character(p$legend$bottom$args$key$text)," ")) mystr<-unlist(strsplit(mystr,",")) cuts<-as.double(unlist(strsplit(mystr, "[^[:digit:]^.^-]"))) cuts <- unique(cuts[!is.na(cuts)]) return(cuts) } #' Save on disk an AFMImage as a Lattice (trellis) plot #' #' save a Lattice (trellis) plot of an \code{\link{AFMImage}} using the \code{\link[sp]{spplot}} method of the sp package. #' This function is used to evaluate visually the quality of the predicted surface when a variogram model is used. #' #' \code{saveSpplotFromAFMImage} save a a Lattice (trellis) plot of an \code{\link{AFMImage}} on disk #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param fullfilename directory and filename to save to png #' @param expectedWidth (optional) expected width of the saved image. Default is 400px. #' @param expectHeight (optional) expected height of the saved image. Default is 300px. #' @param withoutLegend (optional) set at FALSE, the cuts legend will be included in the plot. Default is FALSE. #' @author M.Beauvais #' @rdname AFMVariogramAnalyser-saveSpplotFromAFMImage #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' saveSpplotFromAFMImage(AFMImageOfAluminiumInterface, #' paste(tempdir(), "myFileWithoutLegend.png", sep="/"), 800,800, TRUE) #' saveSpplotFromAFMImage(AFMImageOfAluminiumInterface, #' paste(tempdir(), "myFileWithLegend.png", sep="/"), 800,800, FALSE) #' } saveSpplotFromAFMImage<-function(AFMImage, fullfilename, expectedWidth, expectHeight, withoutLegend) { if (missing(expectedWidth)) expectedWidth = 400 if (missing(expectHeight))expectHeight = 300 if(missing(withoutLegend))withoutLegend=FALSE p<-getSpplotFromAFMImage(AFMImage, expectedWidth, expectHeight, withoutLegend) png(filename=fullfilename, units = "px", width=expectedWidth, height=expectHeight) print(p) dev.off() } #' Get an AFMImage as a Lattice (trellis) plot #' #' get a Lattice (trellis) plot of an \code{\link{AFMImage}} using the \code{\link[sp]{spplot}} method of the sp package. #' This function is used to evaluate visually the quality of the predicted surface when a variogram model is used. #' #' \code{getSpplotFromAFMImage} get a Lattice (trellis) plot of an \code{\link{AFMImage}} on disk #' @param AFMImage an \code{\link{AFMImage}} from Atomic Force Microscopy #' @param expectedWidth (optional) expected width of the saved image. Default is 400px. #' @param expectHeight (optional) expected height of the saved image. Default is 300px. #' @param withoutLegend (optional) set at FALSE, the cuts legend will be included in the plot. Default is FALSE. #' @author M.Beauvais #' @rdname AFMVariogramAnalyser-getSpplotFromAFMImage #' @export #' @examples #' \dontrun{ #' library(AFM) #' #' data(AFMImageOfAluminiumInterface) #' p<-getSpplotFromAFMImage(AFMImageOfAluminiumInterface, 800,800, TRUE) #' print(p) #' } getSpplotFromAFMImage<-function(AFMImage, expectedWidth, expectHeight, withoutLegend) { if (missing(expectedWidth)) expectedWidth = 400 if (missing(expectHeight))expectHeight = 300 if(missing(withoutLegend))withoutLegend=FALSE initialAFMImage<-as.data.frame(AFMImage@data) coordinates(initialAFMImage) = ~x+y #proj4string(initialAFMImage)=CRS("+init") proj4string(initialAFMImage)=CRS() is.projected(initialAFMImage) cuts <- getCutsOfSpplotFromAFMImage(AFMImage) colLimit<-length(cuts)+3 cols <- getSpplotColors(colLimit) if (withoutLegend) { p<-spplot(initialAFMImage["h"], cuts=cuts, col.regions=cols,key=list(lines=FALSE, col="transparent")) }else{ p<-spplot(initialAFMImage["h"], cuts=cuts, col.regions=cols) } #initialAFMImage.lowres <- aggregate(initialAFMImage["h"], fact = 2, fun = mean) return(p) } getSpplotImagefullfilename<-function(exportDirectory, sampleName) { return(paste(exportDirectory, paste(sampleName,"-real.png",sep=""),sep="/")) } getDirectionalVarioCsvFullfilename<-function(exportDirectory, sampleName) { exportCsvFilename<-paste(sampleName,"-directional-variograms.csv", sep="") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") return(exportCsvFullFilename) } getDirectionalVarioPngFullfilename<-function(exportDirectory, sampleName) { directionalGraphName=paste(sampleName,"directional-variograms",sep="-") exportpng2FullFilename<-paste(exportDirectory, paste(directionalGraphName,"png",sep="."),sep="/") return(exportpng2FullFilename) } getOmnidirectionalVarioCsvFullfilename<-function(exportDirectory, sampleName) { exportCsvFilename<-paste(sampleName,"-omnidirectional-variograms.csv", sep="") exportCsvFullFilename<-paste(exportDirectory, exportCsvFilename, sep="/") return(exportCsvFullFilename) } getOmnidirectionalVarioPngFullfilename<-function(exportDirectory, sampleName) { omnidirectionalGraphName=paste(sampleName,"omnidirectional-variogram",sep="-") exportOpngFullFilename<-paste(exportDirectory, paste(omnidirectionalGraphName,"png",sep="."),sep="/") return(exportOpngFullFilename) } getVarioPngchosenFitSample<-function(exportDirectory, sampleName) { exportpngFilename<-paste(sampleName, "chosen-sample.png",sep="-") exportpngFullFilename<-paste(exportDirectory, exportpngFilename, sep="/") return(exportpngFullFilename) } getSpplotPredictedImageFullfilename<-function(exportDirectory, sampleName, modelName) { predictedfilename<-paste(sampleName,modelName,"predicted.png", sep="-") predictedfullfilename<-paste(exportDirectory, predictedfilename,sep="/") return(predictedfullfilename) }
/scratch/gouwar.j/cran-all/cranData/AFM/R/AFMVariogramAnalyser.R
#' Atomic Force Microscopy images tools #' #' The AFM package provides statistics analysis tools for Atomic Force Microscopy image analysis.\cr #' Licence: Affero GPL v3 #' #' A graphical user interface is available by using \code{\link{runAFMApp}} command. #' #' Several high level functions are : #' \itemize{ #' \item create your AFM image from a list of measured heights (see example section of \code{\link{AFMImage}}) #' \item import your image from Nanoscope Analysis (TM) tool (\code{\link{importFromNanoscope}}) #' \item check if your sample is normally distributed and isotropic and get a pdf report (\code{\link{generateCheckReport}}) #' \item calculate the Gaussian mixes of the heights (\code{\link{performGaussianMixCalculation}}) #' \item perform variance (variogram), roughness against lengthscale, fractal analysis and get a pdf report (\code{\link{generateReport}}) #' \item identify 2D networks (\code{\link{getNetworkParameters}}) #' } #' #' Other functions are : #' \itemize{ #' \item check sample: for normality (\code{\link{checkNormality}}) and for isotropy (\code{\link{checkIsotropy}}) #' \item calculate total RMS roughness: quick calculation of total root mean square roughness(\code{\link{totalRMSRoughness}}) #' \item calculate omnidirectional variogram: calculate estimated variogram (\code{\link{calculateOmnidirectionalVariogram}}) #' \item calculate roughness against lenghscale and Power Spectrum Density (PSD): calculate roughness against length scale (\code{\link{RoughnessByLengthScale}}), PSD 1D (\code{\link{PSD1DAgainstFrequency}}) or PSD 2D (\code{\link{PSD2DAgainstFrequency}}) against frequencies #' \item calculate fractal dimension and scale: use (\code{\link{getFractalDimensions}}) function #' \item print in 3D (3D print) (\code{\link{exportToSTL}}) your AFM image #' } #' #' An EC2 instance is available for basic testing at the following address: \url{http://www.afmist.org} #' #' Note: To use with a Brucker(TM) Atomic Force Microscope, use nanoscope analysis(TM) software and #' \itemize{ #' \item Use the "Flatten" function. #' \item Save the flattened image. #' \item Use the "Browse Data Files" windows, right click on image name and then Export the AFM image with the headers and the "Export> ASCII" contextual menu option. #' } #' #' @examples #' \dontrun{ #' library(AFM) #' # Analyse the AFMImageOfRegularPeaks AFM Image from this package #' data("AFMImageOfRegularPeaks") #' AFMImage<-AFMImageOfRegularPeaks #' # exportDirectory="C:/Users/my_windows_login" or exportDirectory="/home/ubuntu" #' exportDirectory=tempdir() #' AFMImage@@fullfilename<-paste(exportDirectory,"AFMImageOfRegularPeaks.txt",sep="/") #' #' # Start to check if your sample is normaly distributed and isotropic. #' generateCheckReport(AFMImage) #' #' # If the sample is normaly distributed and isotropic, generate a full report #' generateReport(AFMImage) #' } #' @references #' Gneiting2012, Tilmann Gneiting, Hana Sevcikova and Donald B. Percival 'Estimators of Fractal Dimension: Assessing the Roughness of Time Series and Spatial Data - Statistics in statistical Science, 2012, Vol. 27, No. 2, 247-277' \cr\cr #' Olea2006, Ricardo A. Olea "A six-step practical approach to semivariogram modeling", 2006, "Stochastic Environmental Research and Risk Assessment, Volume 20, Issue 5 , pp 307-318" \cr\cr #' Sidick2009, Erkin Sidick "Power Spectral Density Specification and Analysis of Large Optical Surfaces", 2009, "Modeling Aspects in Optical Metrology II, Proc. of SPIE Vol. 7390 73900L-1" #' @seealso \code{\link{gstat}}, \code{\link{fractaldim}}, \code{\link{rgl}} #' @author M.Beauvais, J.Landoulsi, I.Liascukiene #' @docType package #' @name AFM #' @import data.table #' @import ggplot2 #' @import gstat #' @import igraph #' @import methods #' @import png #' @import plyr #' @import scales #' @importFrom graphics lines #' @importFrom grDevices chull #' @importFrom dbscan dbscan #' @importFrom stats complete.cases ks.test pnorm #' @importFrom parallel clusterEvalQ clusterExport parLapply #' @importFrom sp SpatialPoints SpatialPolygons Polygon Polygons SpatialPointsDataFrame spDistsN1 coordinates coordinates<- CRS is.projected proj4string proj4string<- spplot surfaceArea over #' @importFrom fractaldim fd.estim.filter1 fd.estim.isotropic fd.estim.squareincr fd.estim.transect.incr1 fd.estim.transect.var #' @importFrom graphics plot #' @importFrom grid grid.layout grid.newpage grid.text pushViewport viewport gpar grid.raster #' @importFrom gridExtra tableGrob ttheme_default #' @importFrom grDevices blues9 dev.off heat.colors pdf png #' @importFrom mixtools normalmixEM #' @importFrom moments skewness #' @importFrom pracma ceil meshgrid #' @importFrom rgl clear3d renderRglwidget rglwidget sceneChange registerSceneChange rglwidgetOutput rgl.close rgl.cur rgl.set rgl.clear rgl.bg rgl.bbox rgl.light rgl.surface rgl.viewpoint rgl.snapshot rgl.open par3d polygon3d rotate3d translate3d shade3d terrain3d writeSTL #' @importFrom shiny actionButton downloadButton downloadHandler fileInput h3 hr htmlOutput HTML imageOutput isolate mainPanel navbarMenu navbarPage observeEvent plotOutput radioButtons reactive reactiveValues renderImage renderPlot renderTable renderUI shinyServer shinyUI sidebarLayout sidebarPanel sliderInput tableOutput tabPanel updateSliderInput uiOutput #' @importFrom shinyjs disable enable useShinyjs #' @importFrom stats coefficients cor dist dnorm lm na.omit sd var #' @importFrom stringr str_sub str_replace_all #' @importFrom utils combn head installed.packages read.table tail write.table packageVersion NULL
/scratch/gouwar.j/cran-all/cranData/AFM/R/pkgname.R
#' Launch the AFM shiny application #' #' Launch the AFM shiny graphical interface to access most of the fonctionalities of the AFM library #' #' @export #' @author M.Beauvais #' @examples #' \dontrun{ #' install.packages("AFM") #' AFM::runAFMApp() #' } runAFMApp <- function() { appDir <- system.file("shiny", "AFM-desktop", package = "AFM") if (appDir == "") { stop("Could not find directory. Try re-installing `AFM`.", call. = FALSE) } shiny::runApp(appDir, display.mode = "normal") }
/scratch/gouwar.j/cran-all/cranData/AFM/R/runAFMApp.R
require(shiny) library(AFM) library(rgl) library(tools) library(data.table) library(xtable) library(ggplot2) library(plyr) library(scales) library(fractaldim) library(stringr) library(grid) library(gridExtra) library(gstat) library(parallel) options(java.parameters = "-Xmx2000m") library(xlsx) data("AFMImageOfAluminiumInterface") data("AFMImageCollagenNetwork") data("AFMImageOfNormallyDistributedHeights") data("AFMImageOfOnePeak") data("AFMImageOfRegularPeaks") # By default, the file size limit is 5MB. It can be changed by # setting this option. Here we'll raise limit to 900MB. options(shiny.maxRequestSize = 900*1024^2) #HEADLESS<-TRUE HEADLESS<-TRUE AFMImageOfNormallyDistributedHeights@data$h<-AFMImageOfNormallyDistributedHeights@data$h-mean(AFMImageOfNormallyDistributedHeights@data$h) AFMImageOfOnePeak@data$h<-AFMImageOfOnePeak@data$h-mean(AFMImageOfOnePeak@data$h) AFMImageOfRegularPeaks@data$h<-AFMImageOfRegularPeaks@data$h-mean(AFMImageOfRegularPeaks@data$h) #AFMImageCollagenNetwork<-extractAFMImage(AFMImageCollagenNetwork,30,15,96) testHEADLESS<-function() { # result = tryCatch({ # open3d() # }, warning = function(w) { # print("warning rgl.open()") # HEADLESS<-TRUE # }, error = function(e) { # print("error rgl.open()") # HEADLESS<-TRUE # }, finally = { # if (!HEADLESS) rgl.close() # print("finally rgl.open()") # }) HEADLESS<-TRUE if (interactive()) HEADLESS<-FALSE } #testHEADLESS() #HEADLESS<-TRUE print(paste("HEADLESS is", HEADLESS)) print(paste("interactive() is", interactive())) #Do not open rgl windows with headless shiny server if (HEADLESS) { options(rgl.useNULL = TRUE) devicesOpenedLength<-length(as.integer(rgl.dev.list())) while(devicesOpenedLength >0) { rgl.close() devicesOpenedLength<-devicesOpenedLength-1 } #rgl.open() }else{ options(rgl.useNULL = FALSE) } afm_data_sets<-c("AFMImageOfAluminiumInterface","AFMImageCollagenNetwork","AFMImageOfNormallyDistributedHeights","AFMImageOfOnePeak","AFMImageOfRegularPeaks") BrowserCanvasPixelLimit<-128 shinyServer(function(input, output, session) { shinyjs::disable("export3DModel3DButton") if (HEADLESS) { open3d() dev <- rgl.cur() save <- options(rgl.inShiny = TRUE) on.exit(options(save)) session$onSessionEnded(function() { rgl.set(dev) rgl.close() }) shinyjs::disable("snapshot3DButton") shinyjs::disable("displayIn3DFileButton") session$sendCustomMessage("sceneChange", sceneChange("thewidget", skipRedraw = FALSE)) session$onFlushed(function() session$sendCustomMessage("sceneChange", sceneChange("thewidget", skipRedraw = FALSE))) } v <- reactiveValues( AFMImageAnalyser = NULL) clearData<-function() { disableButtons() v$AFMImageAnalyser<-NULL } disableButtons<-function() { shinyjs::disable("saveRdataFileButton") shinyjs::disable("calculateGaussianMixButton") shinyjs::disable("downloadGaussianMixSummaryButton") shinyjs::disable("downloadGaussianMixCDFCheckButton") shinyjs::disable("downloadGaussianMixDensityCheckButton") shinyjs::disable("downloadGaussianMixHeightsButton") shinyjs::disable("downloadGaussianMixCountsCheckButton") shinyjs::disable("RoughnessByLengthScaleButton") shinyjs::disable("downloadPSDPSDButton") shinyjs::disable("downloadRoughnessVsLengthscalePSDButton") shinyjs::disable("checkNormalityIsotropyCheckButton") shinyjs::disable("fitVariogramVarianceModelsButton") shinyjs::disable("calculateFractalDimensionsButton") shinyjs::disable("calculateNetworksNetworksButton") shinyjs::disable("displayIn3D3DButton") shinyjs::disable("snapshot3DButton") shinyjs::disable("calculate3DModel3DButton") shinyjs::disable("export3DModel3DButton") shinyjs::disable("generateCheckReport") shinyjs::disable("generateReport") } enableButtons<-function() { shinyjs::enable("saveRdataFileButton") shinyjs::enable("calculateGaussianMixButton") shinyjs::enable("downloadGaussianMixSummaryButton") shinyjs::enable("downloadGaussianMixCDFCheckButton") shinyjs::enable("downloadGaussianMixDensityCheckButton") shinyjs::enable("downloadGaussianMixHeightsButton") shinyjs::enable("downloadGaussianMixCountsCheckButton") shinyjs::enable("RoughnessByLengthScaleButton") shinyjs::enable("downloadPSDPSDButton") shinyjs::enable("downloadRoughnessVsLengthscalePSDButton") shinyjs::enable("checkNormalityIsotropyCheckButton") shinyjs::enable("fitVariogramVarianceModelsButton") shinyjs::enable("calculateFractalDimensionsButton") shinyjs::enable("calculateNetworksNetworksButton") shinyjs::enable("displayIn3D3DButton") if (!HEADLESS) shinyjs::enable("snapshot3DButton") shinyjs::enable("calculate3DModel3DButton") #shinyjs::enable("export3DModel3DButton") #shinyjs::enable("generateCheckReport") #shinyjs::enable("generateReport") } displayImageName<-function() { if (is.null(v$AFMImageAnalyser)) { print("v$AFMImageAnalyser null") return(NULL) } return(renderUI(HTML(c(paste0("<h4>Image</h4>",basename(v$AFMImageAnalyser@AFMImage@fullfilename), sep=""))))) } # # Import/Export Data # output$choose_inputtype <- renderUI({ radioButtons("inputtype", "From", as.list(c("file", "dataset")), inline=TRUE) }) # Check boxes output$choose_type <- renderUI({ # If missing input, return to avoid error later in function if(is.null(input$inputtype)) return() clearData() if (input$inputtype=="file") { fileInput('file1', 'Load', accept=c('.txt','.rdata','.rda')) }else{ radioButtons("choose_dataset", "Image", as.list(afm_data_sets)) } }) observeEvent(input$file1, { inFile <- input$file1 clearData() enableButtons() if (file_ext(inFile$name)=="txt") { AFMImage<-importFromNanoscope(inFile$datapath) v$AFMImageAnalyser<-AFMImageAnalyser(copy(AFMImage)) v$AFMImageAnalyser@AFMImage@fullfilename<-inFile$name #v$localfullfilename <-inFile$name }else{ #print(inFile) x<-load(file= inFile$datapath) isolate({ v$AFMImageAnalyser<-get(x) print(v$AFMImageAnalyser@fullfilename) #v$localfullfilename<-v$AFMImageAnalyser@fullfilename #v$AFMImageAnalyser@AFMImage<-v$AFMImageAnalyser@AFMImage if (!is.null(v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1)) { updateSliderInput(session, "firstSlopeSliderPSD", value = c(v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@tangente_point1, v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1@tangente_point2)) updateSliderInput(session, "lcSliderPSD", value = c(v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@tangente_point1, v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@tangente_point2)) } }) rm(x) } print("file1 button pushed") }) observeEvent(input$choose_dataset, { inFile <- input$choose_dataset #print(inFile) if(is.null(inFile)) return() clearData() enableButtons() AFMImage<-get(inFile) v$AFMImageAnalyser<-AFMImageAnalyser(copy(AFMImage)) #v$localfullfilename <-v$AFMImageAnalyser@AFMImage@fullfilename #print(v$localfullfilename) print("choose_dataset button pushed") }) output$displayIn3DFileButton <- renderUI({ if (HEADLESS == FALSE) { actionButton('displayIn3DFileButton', label = 'Display 3D model') } }) observeEvent(input$displayIn3DFileButton, { if (is.null(input$displayIn3DFileButton)) return(NULL) #print(input$displayIn3DFileButton) if (input$displayIn3DFileButton==c(0)) return(NULL) #print(input$displayIn3DFileButton) if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) displayIn3D(v$AFMImageAnalyser@AFMImage, 1024, noLight=FALSE) print("displayIn3DFileButton button pushed") }) output$saveRdataFileButton <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-AFMImageAnalyser','.Rda', sep='') }, content = function(file) { if(!is.null(v$AFMImageAnalyser)) { print("Exporting calculation") AFMImageAnalyser=copy(v$AFMImageAnalyser) save(AFMImageAnalyser, file= file) print("done") } } ) # # Gaussian Mix tab observer # # output$calculateGaussianMixButton <- renderUI({ # # If missing input, return to avoid error later in function # # if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # # return(NULL) # downloadButton('calculateGaussianMix',label='Calculate Gaussian Mix') # }) # # output$calculateGaussianMix <- downloadHandler( # filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '.csv', sep='') }, # content = function(file) { # write.csv(v$AFMImageAnalyser@psdAnalysis@psd1d, file, row.names = FALSE) # } # ) observeEvent(input$calculateGaussianMixButton, { input$calculateGaussianMixButton print("calculateGaussianMixButton button pushed") if (is.null(input$calculateGaussianMixButton)) { print("input$calculateGaussianMixButton==NULL") return(NULL) } print("input$calculateGaussianMixButton!=NULL") if(input$calculateGaussianMixButton == c(0)) { print("input$calculateGaussianMixButton==0") return() }else{ isolate({ input$calculateGaussianMixButton # Create a Progress object progressGaussianMix <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressGaussianMix$close()) print("calculation of Gaussian Mix") #createAFMImageAnalyser() mepsilon=input$mepsilonGaussianMix min=input$minmaxGaussianMix[1] max=input$minmaxGaussianMix[2] print(mepsilon) print(min) print(max) gaussianMixAnalysis<-AFMImageGaussianMixAnalysis() gaussianMixAnalysis@minGaussianMix<-min gaussianMixAnalysis@maxGaussianMix<-max gaussianMixAnalysis@epsilonGaussianMix<-mepsilon # Create a closure to update progress gaussianMixAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { if (exists("progressGaussianMix")){ if (!is.null(message)) { progressGaussianMix$set(message = message, value = 0) }else{ progressGaussianMix$set(value = value, detail = detail) } } } gaussianMixAnalysis<-performGaussianMixCalculation(AFMImageGaussianMixAnalysis= gaussianMixAnalysis, AFMImage= v$AFMImageAnalyser@AFMImage) print("done gaussianMixAnalysis") v$AFMImageAnalyser@gaussianMixAnalysis<-gaussianMixAnalysis print("done v$AFMImageAnalyser@gaussianMixAnalysis<-gaussianMixAnalysis") }) } }) output$downloadGaussianMixSummaryButton <- renderUI({ # If missing input, return to avoid error later in function #if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # return(NULL) downloadButton('exportGaussianMixSummary',label='Export Summary') }) output$exportGaussianMixSummary <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-GaussianMixes-summary.xlsx', sep='') }, content = function(file) { filenameExportGaussianMixes<-file res<-v$AFMImageAnalyser@gaussianMixAnalysis@summaryMixture write.xlsx(data.frame(res), file=filenameExportGaussianMixes, row.names=FALSE) print("done exportGaussianMixSummary") } ) output$downloadGaussianMixHeightsButton <- renderUI({ # If missing input, return to avoid error later in function #if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # return(NULL) downloadButton('exportGaussianMixHeights',label='Export Heights') }) output$exportGaussianMixHeights <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-GaussianMixes-heights.csv', sep='') }, content = function(file) { filenameExportGaussianMixes<-file heights<-v$AFMImageAnalyser@AFMImage@data$h oneSheetName<-paste0("heights-counts") write.csv( data.frame(heights=heights), file=filenameExportGaussianMixes, row.names=FALSE) print("done exportGaussianMixCDF") } ) output$downloadGaussianMixCDFCheckButton <- renderUI({ # If missing input, return to avoid error later in function #if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # return(NULL) downloadButton('exportGaussianMixCDF',label='Export CDF') }) output$exportGaussianMixCDF <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-GaussianMixes-CDF.xlsx', sep='') }, content = function(file) { filenameExportGaussianMixes<-file totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) - length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[sapply(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix, is.null)]) print(totalNbOfMixtures) for (mixtureNumberOfComponents in seq(v$AFMImageAnalyser@gaussianMixAnalysis@minGaussianMix,length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix))) { baseSheetName<-paste0(mixtureNumberOfComponents,"-components-") print(paste("mixtureNumberOfComponents= ",mixtureNumberOfComponents)) if (!is.null(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { TheExpDT<-v$AFMImageAnalyser@gaussianMixAnalysis@tcdfsEcdfsCheck[[mixtureNumberOfComponents]] oneSheetName<-paste0(baseSheetName,"tcdfs-ecdfs") write.xlsx2(data.frame(tcdfs=TheExpDT$tcdfs, ecdfs= TheExpDT$ecdfs), file=filenameExportGaussianMixes, sheetName=oneSheetName, append=TRUE, row.names=FALSE) } } print("done exportGaussianMixCDF") } ) output$downloadGaussianMixDensityCheckButton <- renderUI({ # If missing input, return to avoid error later in function #if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # return(NULL) downloadButton('exportGaussianMixDensity',label='Export Density') }) output$exportGaussianMixDensity <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-GaussianMixes-density.xlsx', sep='') }, content = function(file) { filenameExportGaussianMixes<-file totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) - length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[sapply(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix, is.null)]) print(totalNbOfMixtures) for (mixtureNumberOfComponents in seq(v$AFMImageAnalyser@gaussianMixAnalysis@minGaussianMix,length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix))) { baseSheetName<-paste0(mixtureNumberOfComponents,"-components-") print(paste("mixtureNumberOfComponents= ",mixtureNumberOfComponents)) if (!is.null(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { allHeights<-v$AFMImageAnalyser@gaussianMixAnalysis@densityCurvesAllHeights[[mixtureNumberOfComponents]] oneSheetName<-paste0(baseSheetName,"density-heights") write.xlsx2(data.frame(density=allHeights$x, heights= allHeights$y,curve=allHeights$style), file=filenameExportGaussianMixes, sheetName=oneSheetName, append=TRUE, row.names=FALSE) } } print("done performGaussianMixCalculationExport") } ) output$downloadGaussianMixCountsCheckButton <- renderUI({ # If missing input, return to avoid error later in function #if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@mixAnalysis)) # return(NULL) downloadButton('exportGaussianMixCounts',label='Export Counts') }) output$exportGaussianMixCounts <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-GaussianMixes-Counts.xlsx', sep='') }, content = function(file) { filenameExportGaussianMixes<-file res<-v$AFMImageAnalyser@gaussianMixAnalysis@summaryMixture write.xlsx2(data.frame(res), file=filenameExportGaussianMixes, sheetName="Summary", row.names=FALSE) totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) - length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[sapply(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix, is.null)]) print(totalNbOfMixtures) for (mixtureNumberOfComponents in seq(v$AFMImageAnalyser@gaussianMixAnalysis@minGaussianMix,length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix))) { baseSheetName<-paste0(mixtureNumberOfComponents,"-components-") print(paste("mixtureNumberOfComponents= ",mixtureNumberOfComponents)) if (!is.null(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { allComponents<-v$AFMImageAnalyser@gaussianMixAnalysis@eachComponentsCounts[[mixtureNumberOfComponents]] oneSheetName<-paste0(baseSheetName,"components-counts") write.xlsx2(data.frame(allComponents), file=filenameExportGaussianMixes, sheetName=oneSheetName, append=TRUE, row.names=FALSE) } } print("done exportGaussianMixCounts") } ) # # PSD tab observer # output$downloadPSDPSDButton <- renderUI({ # If missing input, return to avoid error later in function if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@psdAnalysis)) return(NULL) downloadButton('exportPSD',label='Export PSD') }) output$exportPSD <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '.csv', sep='') }, content = function(file) { write.csv(v$AFMImageAnalyser@psdAnalysis@psd1d, file, row.names = FALSE) } ) output$downloadRoughnessVsLengthscalePSDButton <- renderUI({ # If missing input, return to avoid error later in function if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@psdAnalysis)) return(NULL) downloadButton('exportRoughnessVsLengthscale',label='Export roughness vs. lengthscale') }) output$exportRoughnessVsLengthscale <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '.csv', sep='') }, content = function(file) { write.csv(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale, file, row.names = FALSE) } ) output$downloadRoughnessVsLengthscaleAnalysisPSDButton <- renderUI({ # If missing input, return to avoid error later in function if(is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@psdAnalysis)) return(NULL) downloadButton('exportRoughnessVsLengthscaleTangent',label='Export PSD analysis') }) output$exportRoughnessVsLengthscaleTangent <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-AFMImageAnalyser','.Rda', sep='') }, content = function(aFilename) { print("to be done") AFMImageAnalyser<-copy(v$AFMImageAnalyser) save(AFMImageAnalyser, file=aFilename) } ) observeEvent(input$RoughnessByLengthScaleButton, { input$RoughnessByLengthScaleButton print("RoughnessByLengthScaleButton button pushed") if (is.null(input$RoughnessByLengthScaleButton)) { print("input$RoughnessByLengthScaleButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleButton!=NULL") if(input$RoughnessByLengthScaleButton == c(0)) { print("input$RoughnessByLengthScaleButton==0") return() }else{ isolate({ input$RoughnessByLengthScaleButton # Create a Progress object progressPSD <- shiny::Progress$new() #progressPSD$set(message = "Calculting", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progressPSD$close()) print("calculation of PSD") print(paste("with", 2^input$breaksSliderPSD, "breaks")) #createAFMImageAnalyser() psdAnalysis<-AFMImagePSDAnalysis() # Create a closure to update progress psdAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { if (exists("progressPSD")){ if (!is.null(message)) { progressPSD$set(message = message, value = 0) }else{ progressPSD$set(value = value, detail = detail) } } } psdAnalysis@psd1d_breaks<-2^input$breaksSliderPSD psdAnalysis@psd2d_truncHighLengthScale<-TRUE psdAnalysis<-performAllPSDCalculation(AFMImagePSDAnalysis= psdAnalysis, AFMImage= v$AFMImageAnalyser@AFMImage) print("done psdAnalysis") v$AFMImageAnalyser@psdAnalysis<-psdAnalysis print("done v$AFMImageAnalyser@psdAnalysis<-psdAnalysis") }) } }) observeEvent(input$RoughnessByLengthScaleButton, { input$RoughnessByLengthScaleButton print("RoughnessByLengthScaleButton button pushed") if (is.null(input$RoughnessByLengthScaleButton)) { print("input$RoughnessByLengthScaleButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleButton!=NULL") if(input$RoughnessByLengthScaleButton == c(0)) { print("input$RoughnessByLengthScaleButton==0") return() }else{ isolate({ input$RoughnessByLengthScaleButton # Create a Progress object progressPSD <- shiny::Progress$new() #progressPSD$set(message = "Calculting", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progressPSD$close()) print("calculation of tangents PSD") #print(paste("with", 2^input$breaksSliderPSD, "breaks")) #createAFMImageAnalyser() psdAnalysis<-AFMImagePSDAnalysis() # Create a closure to update progress psdAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { if (exists("progressPSD")){ if (!is.null(message)) { progressPSD$set(message = message, value = 0) }else{ progressPSD$set(value = value, detail = detail) } } } psdAnalysis@psd1d_breaks<-2^input$breaksSliderPSD psdAnalysis@psd2d_truncHighLengthScale<-TRUE psdAnalysis<-performAllPSDCalculation(AFMImagePSDAnalysis= psdAnalysis, AFMImage= v$AFMImageAnalyser@AFMImage) tryCatch({ intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= FALSE) psdAnalysis@AFMImagePSDSlopesAnalysis1<-intersection intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= TRUE) psdAnalysis@AFMImagePSDSlopesAnalysis2<-intersection # AFMImageAnalyser@psdAnalysis<-psdAnalysis # save(AFMImageAnalyser, file=paste0(dirOutput, sampleName,"-AFMImageAnalyser.Rdata")) }, error = function(e) {print(paste("Impossible to find PSD intersections automaticaly",e))}) print("done psdAnalysis") v$AFMImageAnalyser@psdAnalysis<-psdAnalysis print("done v$AFMImageAnalyser@psdAnalysis<-psdAnalysis") }) } }) observeEvent(input$RoughnessByLengthScaleAnalysisButton, { input$RoughnessByLengthScaleAnalysisButton print("RoughnessByLengthScaleAnalysisButton button pushed") if (is.null(input$RoughnessByLengthScaleAnalysisButton)) { print("input$RoughnessByLengthScaleAnalysisButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleAnalysisButton!=NULL") if(input$RoughnessByLengthScaleAnalysisButton == c(0)) { print("input$RoughnessByLengthScaleAnalysisButton==0") return() }else{ isolate({ input$RoughnessByLengthScaleAnalysisButton # Create a Progress object progressPSD <- shiny::Progress$new() #progressPSD$set(message = "Calculting", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progressPSD$close()) print("calculation of tangents PSD") #print(paste("with", 2^input$breaksSliderPSD, "breaks")) #createAFMImageAnalyser() psdAnalysis<-AFMImagePSDAnalysis() # Create a closure to update progress psdAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { if (exists("progressPSD")){ if (!is.null(message)) { progressPSD$set(message = message, value = 0) }else{ progressPSD$set(value = value, detail = detail) } } } tryCatch({ tryCatch({ # intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= FALSE) # v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1<-intersection # intersection <- getAutoIntersectionForRoughnessAgainstLengthscale(AFMImageAnalyser, second_slope= TRUE) # v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2<-intersection print("sliders") print(input$firstSlopeSliderPSD) print(input$lcSliderPSD) intersection <- getIntersectionForRoughnessAgainstLengthscale(v$AFMImageAnalyser, minValue= input$firstSlopeSliderPSD[1], maxValue= input$firstSlopeSliderPSD[2], second_slope= FALSE) v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis1<-intersection intersection <- getIntersectionForRoughnessAgainstLengthscale(v$AFMImageAnalyser, minValue= input$lcSliderPSD[1], maxValue= input$lcSliderPSD[2], second_slope= TRUE) v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2<-intersection }, error = function(e) {print(paste("Error in PSD intersections calculation",e))}) }, error = function(e) {print(paste("Impossible to find PSD intersections automaticaly",e))}) print("done psdAnalysis") #v$AFMImageAnalyser@psdAnalysis<-psdAnalysis print("done v$AFMImageAnalyser@psdAnalysis<-psdAnalysis") }) } }) # # Variance checks tab # output$imageNameCheck<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNameCheck<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNameCheck<-imageName print(imageName) }) output$normalityVarianceCheckUI<-renderUI({ input$checkNormalityIsotropyCheckButton normalityVarianceCheckUI() }) normalityVarianceCheckUI<-reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@variogramAnalysis)) { return(list()) } return(h3("Normality check")) }) output$isotropyVarianceCheckUI<-renderUI({ input$checkNormalityIsotropyCheckButton isotropyVarianceCheckUI() }) isotropyVarianceCheckUI<-reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@variogramAnalysis)) { return(list()) } return(h3("Isotropy checks")) }) output$normalityIsotropyVarianceCheckImage<- renderImage({ normalityIsotropyVarianceCheckImage() }, deleteFile = TRUE) normalityIsotropyVarianceCheckImage <- reactive({ input$checkNormalityIsotropyCheckButton print("checkNormalityIsotropyCheckButton button pushed") if (is.null(input$checkNormalityIsotropyCheckButton)) { print("input$checkNormalityIsotropyCheckButton==NULL") return(NULL) } print("input$checkNormalityIsotropyCheckButton!=NULL") if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@variogramAnalysis)|| length(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0 #|| input$checkNormalityIsotropyCheckButton == c(0) ) { return(list(src = tempfile())) }else{ outfile1 <- tempfile(fileext='.png') png(outfile1, width=640, height=400) checkNormality(AFMImage= v$AFMImageAnalyser@AFMImage, v$AFMImageAnalyser@variogramAnalysis) dev.off() return(list(src = outfile1, contentType = 'image/png', width = 640, height = 400, alt = "fd2d_squareincr")) } return(list(src = tempfile())) }) output$directionalVariogramsVarianceCheckImage<- renderPlot({ directionalVariogramsVarianceCheckPlot() }) directionalVariogramsVarianceCheckPlot <- reactive({ input$checkNormalityIsotropyCheckButton print("checkNormalityIsotropyCheckButton button pushed") if (is.null(input$checkNormalityIsotropyCheckButton)) { print("input$checkNormalityIsotropyCheckButton==NULL") return(NULL) } print("input$checkNormalityIsotropyCheckButton!=NULL") if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@variogramAnalysis)|| is.null(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)|| length(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)==0) { #|| input$checkNormalityIsotropyCheckButton == c(0) return(NULL) }else{ p2 <- ggplot(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms, aes(x=dist, y=gamma, color= as.factor(dir.hor), shape=as.factor(dir.hor))) p2 <- p2 + expand_limits(y = 0) p2 <- p2 + geom_point() p2 <- p2 + geom_line() p2 <- p2 + ylab("semivariance (nm^2)") p2 <- p2 + xlab("distance (nm)") p2 <- p2 + ggtitle("Directional variograms") return(p2) } return(NULL) }) observeEvent(input$checkNormalityIsotropyCheckButton, { input$checkNormalityIsotropyCheckButton print("checkNormalityIsotropyCheckButton button pushed") if (is.null(input$checkNormalityIsotropyCheckButton)) { print("input$checkNormalityIsotropyCheckButton==NULL") return(NULL) } print("input$checkNormalityIsotropyCheckButton!=NULL") if(input$checkNormalityIsotropyCheckButton == c(0)) { print("input$checkNormalityIsotropyCheckButton==c(0)") }else{ isolate({ # Create a Progress object progressVariogramAnalysis <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressVariogramAnalysis$close()) # Create a closure to update progress. print("Calculation of directional variograms") #createAFMImageAnalyser() sampleFitPercentage<-3.43/100 variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage= sampleFitPercentage) variogramAnalysis@updateProgress<-function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressVariogramAnalysis$set(message = message, value = 0) }else{ progressVariogramAnalysis$set(value = value, detail = detail) } } variogramAnalysis@updateProgress(message="Calculating directional variograms", value=0) variogramAnalysis@updateProgress(value= 0, detail = "0/1") variogramAnalysis@directionalVariograms<- calculateDirectionalVariograms(AFMImage= sampleAFMImage(v$AFMImageAnalyser@AFMImage,input$sampleIsotropyVarianceCheckSlider),AFMImageVariogramAnalysis= variogramAnalysis) # if models were already calculated, do not erase them if(!is.null(v$AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)&& length(v$AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)!=0) { v$AFMImageAnalyser@variogramAnalysis@directionalVariograms<-variogramAnalysis@directionalVariograms }else{ v$AFMImageAnalyser@variogramAnalysis<-variogramAnalysis } print("done") }) } return(NULL) }) # # Variance model tab # output$imageNameVarianceModels<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNameVarianceModels<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNameVarianceModels<-imageName print(imageName) }) output$bestmodeltableVarianceModelsUI<-renderUI({ input$fitVariogramVarianceModelsButton bestmodeltableVarianceModelsUI() }) bestmodeltableVarianceModelsUI<-reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@variogramAnalysis@variogramModels)) { return(list()) } return(h3("Variogram models")) }) output$bestmodeltableVarianceModelsPlot <- renderTable({ input$fitVariogramVarianceModelsButton bestmodeltableVarianceModelsPlot() }, include.rownames=FALSE, include.colnames=TRUE) bestmodeltableVarianceModelsPlot <- reactive({ input$fitVariogramVarianceModelsButton print("fitVariogramVarianceModelsButton button pushed") if (is.null(input$fitVariogramVarianceModelsButton)) { print("input$fitVariogramVarianceModelsButton==NULL") return(NULL) } print("input$fitVariogramVarianceModelsButton!=NULL") # if(input$fitVariogramVarianceModelsButton == c(0)) { # print("input$fitVariogramVarianceModelsButton==c(0)") # return(NULL) # } if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@variogramAnalysis@variogramModels)) { return(NULL) } # get variogram model evaluation if (length(v$AFMImageAnalyser@variogramAnalysis@variogramModels)!=0) { mergedDT<-getDTModelEvaluation(v$AFMImageAnalyser@variogramAnalysis) #print(mergedDT) sillrangeDT<-getDTModelSillRange(v$AFMImageAnalyser@variogramAnalysis) setkey(sillrangeDT, "model") name<-press<-NULL sampleDT <- mergedDT[name==basename(v$AFMImageAnalyser@AFMImage@fullfilename)] setkey(sampleDT, "model") #sampleDT <- sampleDT[cor>0.98] sampleDT<-merge(sampleDT, sillrangeDT, by="model") sampleDT<-sampleDT[,name:=NULL] sampleDT <- unique(sampleDT) sampleDT <- sampleDT[order(-rank(cor), rank(press))] #print(sampleDT) tot<-sum(v$AFMImageAnalyser@AFMImage@data$h) summarySampleDT<-copy(sampleDT) summarySampleDT$press<-round(sampleDT$press) summarySampleDT$sill<-round(sampleDT$sill) summarySampleDT$range<-round(sampleDT$range) print("plotting variogram table...") fractalDF<-data.frame(summarySampleDT) #print(fractalDF) return({xtable(fractalDF)}) }else{ return(NULL) } }) getSpplotColors<-function(colLimit) { blues9[3:colLimit] } output$allmodelsModelImage<-renderImage({ allmodelsModelImage() }) allmodelsModelImage<- reactive({ input$fitVariogramVarianceModelsButton print("fitVariogramVarianceModelsButton button pushed") if (is.null(input$fitVariogramVarianceModelsButton)) { print("input$fitVariogramVarianceModelsButton==NULL") outfile1 <- tempfile(fileext='.png') png(outfile1, width=800, height=300) dev.off() return(list(src = outfile1, contentType = 'image/png', width = 800, height = 300, alt = "Models not calculated")) } print("input$fitVariogramVarianceModelsButton!=NULL") # if(input$fitVariogramVarianceModelsButton == c(0)) { # print("input$fitVariogramVarianceModelsButton==c(0)") # outfile1 <- tempfile(fileext='.png') # png(outfile1, width=800, height=300) # dev.off() # return(list(src = outfile1, # contentType = 'image/png', # width = 800, # height = 300, # alt = "Models not calculated")) # } if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@variogramAnalysis)|| is.null(v$AFMImageAnalyser@variogramAnalysis@variogramModels)) { outfile1 <- tempfile(fileext='.png') png(outfile1, width=800, height=300) dev.off() return(list(src = outfile1, contentType = 'image/png', width = 800, height = 300, alt = "Models not calculated")) } # get variogram model evaluation if (length(v$AFMImageAnalyser@variogramAnalysis@variogramModels)!=0) { mergedDT<-getDTModelEvaluation(v$AFMImageAnalyser@variogramAnalysis) #print(mergedDT) sillrangeDT<-getDTModelSillRange(v$AFMImageAnalyser@variogramAnalysis) setkey(sillrangeDT, "model") name<-press<-NULL sampleDT <- mergedDT[name==basename(v$AFMImageAnalyser@AFMImage@fullfilename)] setkey(sampleDT, "model") #sampleDT <- sampleDT[cor>0.98] sampleDT<-merge(sampleDT, sillrangeDT, by="model") sampleDT<-sampleDT[,name:=NULL] sampleDT <- unique(sampleDT) sampleDT <- sampleDT[order(-rank(cor), rank(press))] ##################### # new page for experimental variogram and models numberOfModelsPerPage=length(v$AFMImageAnalyser@variogramAnalysis@variogramModels) allVarioModels<-str_sub(sampleDT$model,-3) outfile1 <- tempfile(fileext='.png') #print(outfile1) png(outfile1, width=800, height=numberOfModelsPerPage*300) #grid.newpage() printVariogramModelEvaluations(AFMImageAnalyser= v$AFMImageAnalyser, sampleDT= sampleDT, numberOfModelsPerPage= numberOfModelsPerPage) dev.off() return(list(src = outfile1, contentType = 'image/png', width = 800, height = 1600, alt = "all evaluated models")) } }) observeEvent(input$fitVariogramVarianceModelsButton, { input$fitVariogramVarianceModelsButton print("fitVariogramVarianceModelsButton button pushed") if (is.null(input$fitVariogramVarianceModelsButton)) { print("input$fitVariogramVarianceModelsButton==NULL") return(NULL) } print("input$fitVariogramVarianceModelsButton!=NULL") if(input$fitVariogramVarianceModelsButton == c(0)) { print("input$fitVariogramVarianceModelsButton==c(0)") }else{ isolate({ # Create a Progress object progressVariogramAnalysis <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressVariogramAnalysis$close()) #createAFMImageAnalyser() sampleFitPercentage<-input$sampleFitVarianceModelsSlider/100 variogramAnalysis<-AFMImageVariogramAnalysis(sampleFitPercentage= sampleFitPercentage) variogramAnalysis@updateProgress<-function(value = NULL, detail = NULL, message = NULL) { if (is.null(value)&& is.null(detail)&& is.null(message)) return(TRUE) if (!is.null(message)) { progressVariogramAnalysis$set(message = message, value = 0) }else{ progressVariogramAnalysis$set(value = value, detail = detail) } } variogramAnalysis@updateProgress(message="Calculating omnidirectional variogram", value=0) variogramAnalysis@omnidirectionalVariogram<- calculateOmnidirectionalVariogram(AFMImage=v$AFMImageAnalyser@AFMImage,AFMImageVariogramAnalysis= variogramAnalysis) variogramAnalysis@updateProgress(message="Fitting and evaluating",value=0) # normality and isotropy already calculated if(!is.null(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)&& length(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)!=0) { variogramAnalysis@directionalVariograms<-v$AFMImageAnalyser@variogramAnalysis@directionalVariograms } v$AFMImageAnalyser@variogramAnalysis<-evaluateVariogramModels(variogramAnalysis, v$AFMImageAnalyser@AFMImage) print("done") }) } return(NULL) }) # # File Tab display # output$basicInfoFileTable <- renderTable({ imageInformations()}, include.rownames=FALSE, include.colnames=FALSE) output$roughnessesFileTable <- renderTable({ roughnessesImagesTable()}, include.rownames=FALSE, include.colnames=FALSE) output$imageInformationsUI<-renderUI({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) h3("Image informations") }) output$roughnessUI<-renderUI({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) h3("Roughnesses") }) imageInformations <- function(){ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) scansize<-v$AFMImageAnalyser@AFMImage@scansize samplesperline<-as.character(v$AFMImageAnalyser@AFMImage@samplesperline) lines<-as.character(v$AFMImageAnalyser@AFMImage@lines) charact<-data.frame(name=c("scansize","samplesperline","lines"),values=c(paste0(scansize,"nm"),paste0(samplesperline,"px"),paste0(lines,"px"))) return({xtable(charact)}) } roughnessesImagesTable <- function(){ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) roughnesses<-getRoughnessParameters(v$AFMImageAnalyser@AFMImage) roughnessDF<-data.frame(name=c("total Rrms","Ra (mean roughness)"),values=c(paste0(round(roughnesses$totalRMSRoughness_TotalRrms, digits=4),"nm"),paste0(round(roughnesses$MeanRoughness_Ra, digits=4),"nm"))) return({xtable(roughnessDF)}) } # # Gaussian Mix Tab display # output$imageNameGaussianMix<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNamePSD<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNameGaussianMix<-imageName #print(imageName) }) output$plotGaussianMixUI<-renderUI({ myplotGaussianMixUI() }) myplotGaussianMixUI<-reactive({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { return(NULL) } h3("Mixtures") }) output$summaryGaussianMixUI<-renderUI({ mysummaryGaussianMixUI() }) mysummaryGaussianMixUI<-reactive({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { return(NULL) } h3("Summary") }) output$plotGaussianMixPlot <- renderPlot({ my_plotGaussianMixPlot() }) my_plotGaussianMixPlot <- reactive({ if (is.null(input$calculateGaussianMixButton)) { print("input$calculateGaussianMixButton==NULL") return(NULL) } print("input$calculateGaussianMixButton!=NULL") if (is.null(v$AFMImageAnalyser)) { #print("gaussianMixSummary is.null(v$AFMImageAnalyser") return(NULL) } if (is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { #print("gaussianMixSummary is.null(v$AFMImageAnalyser@gaussianMixAnalysis") return(NULL) } if (length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix)==0) { print("length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix)==0") return(NULL) } if(input$calculateGaussianMixButton == c(0)) { print("my_plotGaussianMixTable input$calculateGaussianMixButton==0") }else{ isolate({ if (is.null(v$AFMImageAnalyser)) { print("is.null(v$AFMImageAnalyser)") }else{ if (is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { print("is.null(v$AFMImageAnalyser@gaussianMixAnalysis)") }else{ heights<-v$AFMImageAnalyser@AFMImage@data$h distinct.heights <- sort(unique(heights)) totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) #totalNbOfMixtures<-4 #count number a non element in list totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) - length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[sapply(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix, is.null)]) print(totalNbOfMixtures) grobList <- vector("list", (totalNbOfMixtures)*3) listNb<-0 for (mixtureNumberOfComponents in seq(v$AFMImageAnalyser@gaussianMixAnalysis@minGaussianMix,length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix))) { #mixtureNumberOfComponents<-2 print(paste("mixtureNumberOfComponents= ",mixtureNumberOfComponents)) if (!is.null(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { TheExpDT<-v$AFMImageAnalyser@gaussianMixAnalysis@tcdfsEcdfsCheck[[mixtureNumberOfComponents]] p1 <- ggplot(data=TheExpDT) p1 <- p1 + geom_point(aes(tcdfs, ecdfs, colour = "blue"),data=TheExpDT, show.legend = FALSE) p1 <- p1 + ylab("Empirical CDF") p1 <- p1 + geom_abline(slope=1, intercept = 0) p1 <- p1 + xlab("Theoretical CDF") listNb<-listNb+1 grobList[[listNb]] <- p1 allHeights<-v$AFMImageAnalyser@gaussianMixAnalysis@densityCurvesAllHeights[[mixtureNumberOfComponents]] p2<-ggplot(allHeights, aes(x=x, y=y)) + geom_line(alpha=0.8,size=1.2, aes(color=style)) p2 <- p2 + ylab("Density") p2 <- p2 + xlab("Heights (nm)") p2 <- p2 + theme(legend.title=element_blank()) p2 listNb<-listNb+1 grobList[[listNb]] <- p2 allComponents<-v$AFMImageAnalyser@gaussianMixAnalysis@eachComponentsCounts[[mixtureNumberOfComponents]] p3 <- ggplot(data=allComponents) p3 <- p3 + geom_point(data=allComponents, aes(heights, counts), size=1.05, color="#FF0000") p3 <- p3 + geom_histogram(data= data.frame(heights=heights), aes(x=heights), binwidth=1, color="#000000", fill="#000080", alpha=0.4) listNb<-listNb+1 grobList[[listNb]] <- p3 } } grid.arrange(grobs = grobList, ncol=3,widths = c(1,1,1)) }} }) } return(NULL) }) output$gaussianMixSummary <- renderPrint({ if (is.null(input$calculateGaussianMixButton)) { print("input$calculateGaussianMixButton==NULL") return(NULL) } #print("gaussianMixSummary input$calculateGaussianMixButton!=NULL") if (is.null(v$AFMImageAnalyser)) { #print("gaussianMixSummary is.null(v$AFMImageAnalyser") return(NULL) } if (is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { #print("gaussianMixSummary is.null(v$AFMImageAnalyser@gaussianMixAnalysis") return(NULL) } if (length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix)==0) { #print("gaussianMixSummary length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix)==0") return(NULL) } if (is.null(v$AFMImageAnalyser)) { print("gaussianMixSummary is.null(v$AFMImageAnalyser)") }else{ if (is.null(v$AFMImageAnalyser@gaussianMixAnalysis)) { print("gaussianMixSummary is.null(v$AFMImageAnalyser@gaussianMixAnalysis)") }else{ res=data.table(number_of_components=c(0), #component=c(0), mean=c(0), sd=c(0), lambda=c(0)) totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) #totalNbOfMixtures<-length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix) - length(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[sapply(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix, is.null)]) + 1 for (mixtureNumberOfComponents in seq(2,totalNbOfMixtures)) { if (!is.null(v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]])) { mixture<-v$AFMImageAnalyser@gaussianMixAnalysis@gaussianMix[mixtureNumberOfComponents][[1]] for(component.number in seq(1, mixtureNumberOfComponents)) { if (length(mixture)>0) { mean=mixture$mu[component.number] sd=mixture$sigma[component.number] lambda=mixture$lambda[component.number] res=rbind(res, data.table(number_of_components=mixtureNumberOfComponents, #component=component.number, mean=mean, sd=sd, lambda=lambda)) } } } } res<-res[-1,] res<-res[order(number_of_components, mean)] res } } #summary(Muts) }) # # PSD Tab display # output$imageNamePSD<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNamePSD<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNamePSD<-imageName #print(imageName) }) output$plotPSDUI<-renderUI({ myPlotPSDUI() }) myPlotPSDUI<-reactive({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@psdAnalysis)) { return(NULL) } h3("Power spectrum density") }) output$plotPSDRvsLUI<-renderUI({ myplotPSDRvsLUI() }) myplotPSDRvsLUI<-reactive({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@psdAnalysis)) { return(NULL) } h3("Roughness vs. lengthscale") }) output$plotPSD <- renderPlot({ my_PSD_plot() }) my_PSD_plot <- reactive({ if (is.null(input$RoughnessByLengthScaleButton)) { print("input$RoughnessByLengthScaleButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleButton!=NULL") if (length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) { print("length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0") return(NULL) } # if(input$RoughnessByLengthScaleButton == c(0)) { # print("my_PSD_plot input$RoughnessByLengthScaleButton==0") # }else{ isolate({ if (is.null(v$AFMImageAnalyser)) { print("is.null(v$AFMImageAnalyser)") }else{ if (is.null(v$AFMImageAnalyser@psdAnalysis)) { print("is.null(v$AFMImageAnalyser@psdAnalysis)") }else{ datap<-v$AFMImageAnalyser@psdAnalysis@psd1d p <- ggplot(data=datap) p <- p + geom_point(aes(freq, PSD, color=type),data=datap[datap$type %in% c("PSD-2D")]) p <- p + geom_line(aes(freq, PSD, color=type),data=datap[datap$type %in% c("PSD-1D")],size=1.1) p <- p + scale_x_log10() p <- p + scale_y_log10() p <- p + ylab("PSD (nm^4)") p <- p + xlab("Frequency (nm^-1)") return(p) }} }) # } return(NULL) }) output$plotPSDRvsL <- renderPlot({ my_RoughnessVsLengthscale_plot() }) my_RoughnessVsLengthscale_plot <- reactive({ if (is.null(input$RoughnessByLengthScaleButton)) { print("input$RoughnessByLengthScaleButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleButton!=NULL") print(length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)) if (length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) { print("length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0") return(NULL) } # if(input$RoughnessByLengthScaleButton == c(0)) # { # print("my_RoughnessVsLengthscale_plot input$RoughnessByLengthScaleButton==0") # }else{ isolate({ input$RoughnessByLengthScaleButton if (is.null(v$AFMImageAnalyser)) { print("is.null(v$AFMImageAnalyser)") }else{ if (is.null(v$AFMImageAnalyser@psdAnalysis)) { print("is.null(v$AFMImageAnalyser@psdAnalysis)") }else{ print("Displaying Roughness vs. lengthscale") r<-roughness<-filename<-NULL #v$AFMImagePSDAnalysis@roughnessAgainstLengthscale$filename<-v$localfullfilename p1 <- ggplot(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale, aes(x=r, y=roughness, colour= "red")) p1 <- p1 + geom_point() p1 <- p1 + geom_line() p1 <- p1 + ylab("roughness (nm)") p1 <- p1 + xlab("lengthscale (nm)") p1 <- p1 + guides(color=FALSE) p1 <- p1 + scale_color_discrete(NULL) return(p1) }} }) # } return(NULL) }) output$plotAnalysisPSDRvsLUI<-renderUI({ myplotAnalysisPSDRvsLUI() }) myplotAnalysisPSDRvsLUI<-reactive({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@psdAnalysis)) { return(NULL) } h3("Roughness vs. lengthscale") }) output$plotAnalysisPSDRvsL <- renderPlot({ my_RoughnessVsLengthscale_Analysisplot() }) my_RoughnessVsLengthscale_Analysisplot <- reactive({ if (is.null(input$RoughnessByLengthScaleAnalysisButton)) { print("input$RoughnessByLengthScaleAnalysisButton==NULL") return(NULL) } print("input$RoughnessByLengthScaleAnalysisButton!=NULL") print(length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)) if (length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0) { print("length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)==0") return(NULL) } # if(input$RoughnessByLengthScaleAnalysisButton == c(0)) # { # print("my_RoughnessVsLengthscale_plot input$RoughnessByLengthScaleAnalysisButton==0") # }else{ isolate({ input$RoughnessByLengthScaleAnalysisButton if (is.null(v$AFMImageAnalyser)) { print("is.null(v$AFMImageAnalyser)") }else{ if (is.null(v$AFMImageAnalyser@psdAnalysis)) { print("is.null(v$AFMImageAnalyser@psdAnalysis)") }else{ print("Displaying Roughness vs. lengthscale with tangente") r<-roughness<-filename<-NULL #v$AFMImagePSDAnalysis@roughnessAgainstLengthscale$filename<-v$localfullfilename p1 <- ggplot(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale, aes(x=r, y=roughness, colour= "red")) p1 <- p1 + geom_point() p1 <- p1 + geom_line() p1 <- p1 + ylab("roughness (nm)") p1 <- p1 + xlab("lengthscale (nm)") p1 <- p1 + guides(color=FALSE) p1 <- p1 + scale_color_discrete(NULL) if (length(v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2)!=0){ aIntercept<-v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@yintersept aSlope<-v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@slope print(aIntercept) print(aSlope) print(v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@wsat) p1 <- p1 + geom_abline(intercept = aIntercept, slope = aSlope, size=1.2, linetype = 1) p1 <- p1 + geom_vline(xintercept = v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@lc, linetype = 2) p1 <- p1 + geom_hline(yintercept = v$AFMImageAnalyser@psdAnalysis@AFMImagePSDSlopesAnalysis2@wsat, size=1.2, linetype = 1) } return(p1) }} }) # } return(NULL) }) output$imageNameAnalysisPSD<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNamePSD<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNameAnalysisPSD<-imageName #print(imageName) }) # # 3D display # output$imageName3D<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageName3D<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } print(imageName) output$imageName3D<-imageName }) output$panel3DUI<-renderUI({ input$calculateFractalDimensionsButton myPanel3DUI() }) myPanel3DUI<-reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@AFMImage)) { return(list()) } if (!HEADLESS) return(h3("Three dimensional display")) if ((v$AFMImageAnalyser@AFMImage@samplesperline>BrowserCanvasPixelLimit)|| (v$AFMImageAnalyser@AFMImage@lines>BrowserCanvasPixelLimit)) return(h3("Simplified three dimensional display")) else return(h3("Three dimensional display")) }) observeEvent(input$displayIn3D3DButton, { input$displayIn3D3DButton if (is.null(input$displayIn3D3DButton)) return(NULL) if (input$displayIn3D3DButton==c(0)) return(NULL) if(is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) print("displayIn3D3DButton button pushed observeEvent") copyH<-copy(v$AFMImageAnalyser@AFMImage@data$h) width<-600 #AFMImage<-v$AFMImageAnalyser@AFMImage if (HEADLESS) AFMImage<-simplifyAFMImage(v$AFMImageAnalyser@AFMImage, BrowserCanvasPixelLimit,BrowserCanvasPixelLimit) else AFMImage<-v$AFMImageAnalyser@AFMImage # respect the proportion between horizontal / vertical distance and heigth newHeights <- input$height3Dslider*(AFMImage@data$h)*(AFMImage@samplesperline)/(AFMImage@scansize) minH<-min(newHeights) # TODO check validity of created image instead if(!is.na(minH)) { newH<-(newHeights-minH) y<-matrix(newH, nrow = AFMImage@lines, ncol = AFMImage@samplesperline) z <- seq(1,ncol(y),by=1) x <- (1:nrow(y)) ylim <- range(y) ylen <- ylim[2] - ylim[1] + 1 print(ylen) colorlut <- heat.colors(ylen, alpha = 1) # height color lookup table col <- colorlut[ y-ylim[1]+1 ] # assign colors to heights print(HEADLESS) if ((!HEADLESS)||is.null(rgl.cur())||rgl.cur()==0) { print("open rgl") rgl.open() }else{ print("using dev") dev <- rgl.cur() rgl.set(dev) rgl.clear() } bboxylen=3 if(ylim[2]<60) bboxylen=2 if (!HEADLESS) { par3d(windowRect = 100 + c( 0, 0, width, width ) ) rgl.bg(color = c("white"), back = "lines") rgl.bbox(color = c("#333333", "black"), emission = "#333333", specular = "#111111", shininess = 0, alpha = 0.6, xlen=0, zlen=0, ylen=bboxylen ) rgl.surface(x, z, y, color=col, back="lines") i<-130 rgl.viewpoint(i,i/4,zoom=1.1) }else{ if (!exists("mysurface")||is.null(mysurface)) { myb3d<-bg3d(color = c("white"), back = "lines") axes3d(color = c("#333333", "black"), labels=FALSE) # mybbox3d<-bbox3d(color = c("#333333", "black"), # emission = "#000000", # #specular = "#111111", # specular = "#000000", # front="line", back="line", # shininess = 0, draw_front=FALSE, alpha = 0.6, # xlen=0, zlen=0, ylen=bboxylen ) s3d<-surface3d(x, z, y, color=col, back="lines") print(paste("add", s3d)) root <- currentSubscene3d() newSubscene3d("inherit", "inherit", "inherit", copyShapes = TRUE, parent = root) clipplanes3d(1, 0, 0, 0) mysurface <- scene3d() plot3d(mysurface) }else{ print(paste("delete", s3d)) useSubscene3d(root) delFromSubscene3d(s3d) s3d<-surface3d(x, z, y, color=col, back="lines") } i<-130 rgl.viewpoint(i,i/4,zoom=1.1) } } v$AFMImageAnalyser@AFMImage@data$h<-copyH print("end display") # }) }) # output$thewidget <- renderRglwidget({ # if (is.null(input$displayIn3D3DButton)) return(rglwidget()) # if (input$displayIn3D3DButton==c(0)) return(rglwidget()) # if(is.null(v$AFMImageAnalyser@AFMImage)) return(rglwidget()) # print("displayIn3D3DButton button pushed renderRglwidget") # rglwidget() # print("end renderRglwidget") # }) # output$thecontroller <- # renderRglcontroller({ # # if (input$displayIn3D3DButton!=c(0)) # # # It makes more sense to use rglcontroller as below, but # # # this works... # playwidget("thewidget", respondTo = "height3Dslider") # }) # output$thewidget <- renderRglwidget({ if (HEADLESS) { if (is.null(input$displayIn3D3DButton)) return(NULL) if (input$displayIn3D3DButton==c(0)) return(NULL) if(is.null(v$AFMImageAnalyser@AFMImage)) return(NULL) # print("displayIn3D3DButton button pushed renderRglwidget") rglwidget(height=600, reuse=FALSE) #rglwidget(height=600, controllers="thecontroller") } }) output$snapshot3DButton <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-multiply', input$height3Dslider,'-3D.png', sep='') }, content = function(file) { if(!is.null(v$AFMImageAnalyser@AFMImage)) { print("Exporting 3D png") rgl.snapshot(filename = file) print("done") } } ) observeEvent(input$calculate3DModel3DButton, { input$calculate3DModel3DButton print("calculate3DModel3DButton button pushed") if (is.null(input$calculate3DModel3DButton)) { print("input$calculate3DModel3DButton==NULL") return(NULL) } print("input$calculate3DModel3DButton!=NULL") if(input$calculate3DModel3DButton == c(0)) { print("input$calculate3DModel3DButton==0") return() }else{ isolate({ input$calculate3DModel3DButton # Create a Progress object progressCalculate3DModel <- shiny::Progress$new() #progressPSD$set(message = "Calculting", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progressCalculate3DModel$close()) print("calculation of 3D Model") #createAFMImageAnalyser() modelAnalysis<-new ("AFMImage3DModelAnalysis") # Create a closure to update progress modelAnalysis@updateProgress<- function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressCalculate3DModel$set(message = message, value = 0) }else{ progressCalculate3DModel$set(value = value, detail = detail) } return(TRUE) } modelAnalysis@updateProgress(message="Calculating 3D faces", value=0) if(!is.null(v$AFMImageAnalyser@AFMImage)) { print("Exporting 3D printing model") copyH<-copy(v$AFMImageAnalyser@AFMImage@data$h) v$AFMImageAnalyser@AFMImage@data$h<-v$AFMImageAnalyser@AFMImage@data$h*input$height3Dslider modelAna<-calculate3DModel(AFMImage3DModelAnalysis= modelAnalysis, AFMImage= v$AFMImageAnalyser@AFMImage) v$AFMImageAnalyser@AFMImage@data$h<-copyH } print("calculate3DModel done") v$AFMImageAnalyser@threeDimensionAnalysis<-modelAna print("done v$AFMImageAnalyser@threeDimensionAnalysis<-modelAnalysis") shinyjs::enable("export3DModel3DButton") }) } }) output$export3DModel3DButton <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-multiply', input$height3Dslider, '.stl', sep='') }, content = function(file) { if(!is.null(v$AFMImageAnalyser@AFMImage)) { print("Exporting 3D printing model") copyH<-copy(v$AFMImageAnalyser@AFMImage@data$h) v$AFMImageAnalyser@AFMImage@data$h<-v$AFMImageAnalyser@AFMImage@data$h*input$height3Dslider exportToSTL(AFMImage3DModelAnalysis=v$AFMImageAnalyser@threeDimensionAnalysis, AFMImage=v$AFMImageAnalyser@AFMImage, stlfullfilename=file) v$AFMImageAnalyser@AFMImage@data$h<-copyH print("done") } } ) # # Fractal tab observer # observeEvent(input$calculateFractalDimensionsButton, { input$calculateFractalDimensionsButton print("calculateFractalDimensionsButton button pushed") print("input$calculateFractalDimensionsButton!=NULL") if(input$calculateFractalDimensionsButton == c(0)) { print("input$calculateFractalDimensionsButton==0") return() }else{ # isolate({ input$calculateFractalDimensionsButton # Create a Progress object progressFractal <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressFractal$close()) print("calculation of Fractal dimensions and scales") #createAFMImageAnalyser() # fractal dimension analysis fdAnalysis<-AFMImageFractalDimensionsAnalysis() # Create a closure to update progress fdAnalysis@updateProgress<-function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressFractal$set(message = message, value = 0) }else{ progressFractal$set(value = value, detail = detail) } } fdAnalysis@fractalDimensionMethods<-getFractalDimensions(v$AFMImageAnalyser@AFMImage, fdAnalysis) print("done v$AFMImageAnalyser@fdAnalysis<-fdAnalysis") # }) v$AFMImageAnalyser@fdAnalysis<-fdAnalysis } }) # # Fractal calculation display # output$imageNameFractal<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNameFractal<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } output$imageNameFractal<-imageName # print(imageName) }) output$fractalDimensionsFractalTable <- renderTable({ input$calculateFractalDimensionsButton fractalDimensionsFractalTable() }, include.rownames=FALSE, include.colnames=TRUE) fractalDimensionsFractalTable <- reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@fdAnalysis)) { return(NULL) } n<-length(v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods) if (n!=0) { sampleDT <- data.table( fd_method= c(sapply(1:n, function(i) v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_method)), fd= c(sapply(1:n, function(i) v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd)), fd_scale= c(sapply(1:n, function(i) v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods[[i]]@fd_scale))) print(sampleDT) setkey(sampleDT, "fd_method") sampleDT <- unique(sampleDT) name<-NULL fractalDF<-data.frame(sampleDT[, name:=NULL]) return({xtable(fractalDF)}) }else{ return(NULL) } }) output$fractalDimensionsFractalUI<-renderUI({ input$calculateFractalDimensionsButton myfractalDimensionsFractalUI() }) myfractalDimensionsFractalUI<-reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@fdAnalysis)) { return(list()) } return(h3("Fractal dimensions and scales")) }) output$fractalDimensionsFractalPlots_fd2d_isotropic <- renderImage({ input$calculateFractalDimensionsButton fractalDimensionsFractalPlots_fd2d_isotropic() }, deleteFile = TRUE) output$fractalDimensionsFractalPlots_fd2d_squareincr <- renderImage({ input$calculateFractalDimensionsButton fractalDimensionsFractalPlots_fd2d_squareincr() }, deleteFile = TRUE) output$fractalDimensionsFractalPlots_fd2d_filter1 <- renderImage({ input$calculateFractalDimensionsButton fractalDimensionsFractalPlots_fd2d_filter1() }, deleteFile = TRUE) fractalDimensionsFractalPlots_fd2d_isotropic <- reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@fdAnalysis)) { return(list(src = tempfile())) } if (length(v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)!=0) { # Create a Progress object progressFractal <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressFractal$close()) # Create a closure to update progress. updateprogressFractal <- function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressFractal$set(message = message, value = 0) }else{ progressFractal$set(value = value, detail = detail) } } updateprogressFractal(message="2/2 - Calculating images", value=0) sampleName<-basename(v$AFMImageAnalyser@AFMImage@fullfilename) rf2d <- matrix(v$AFMImageAnalyser@AFMImage@data$h, nrow=v$AFMImageAnalyser@AFMImage@samplesperline) updateprogressFractal(value= 1/4, detail = "1/4") outfile1 <- tempfile(fileext='.png') print("saving 1") png(outfile1, width=400, height=300) fd2d_isotropic <- fd.estim.isotropic(rf2d, p.index = 1, direction='hvd+d-', plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() print("done 1") return(list(src = outfile1, contentType = 'image/png', width = 400, height = 300, alt = "fd2d_isotropic") ) } return(list(src = tempfile())) }) fractalDimensionsFractalPlots_fd2d_squareincr <- reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@fdAnalysis)) { return(list(src = tempfile())) } if (length(v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)!=0) { # Create a Progress object progressFractal <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressFractal$close()) # Create a closure to update progress. updateprogressFractal <- function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressFractal$set(message = message, value = 0) }else{ progressFractal$set(value = value, detail = detail) } } updateprogressFractal(message="2/2 - Calculating images", value=0) sampleName<-basename(v$AFMImageAnalyser@AFMImage@fullfilename) rf2d <- matrix(v$AFMImageAnalyser@AFMImage@data$h, nrow=v$AFMImageAnalyser@AFMImage@samplesperline) updateprogressFractal(value= 2/4, detail = "2/4") outfile1 <- tempfile(fileext='.png') print("saving 2") png(outfile1, width=400, height=300) fd2d_squareincr <- fd.estim.squareincr(rf2d, p.index = 1, plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() print("done 2") return(list(src = outfile1, contentType = 'image/png', width = 400, height = 300, alt = "fd2d_squareincr") ) } return(list(src = tempfile())) }) fractalDimensionsFractalPlots_fd2d_filter1 <- reactive({ if (is.null(v$AFMImageAnalyser)||is.null(v$AFMImageAnalyser@fdAnalysis)) { return(list(src = tempfile())) } if (length(v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)!=0) { # Create a Progress object progressFractal <- shiny::Progress$new() # Close the progress when this reactive exits (even if there's an error) on.exit(progressFractal$close()) # Create a closure to update progress. updateprogressFractal <- function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressFractal$set(message = message, value = 0) }else{ progressFractal$set(value = value, detail = detail) } } updateprogressFractal(message="2/2 - Calculating images", value=0) sampleName<-basename(v$AFMImageAnalyser@AFMImage@fullfilename) rf2d <- matrix(v$AFMImageAnalyser@AFMImage@data$h, nrow=v$AFMImageAnalyser@AFMImage@samplesperline) updateprogressFractal(value= 3/4, detail = "3/4") print("saving 3") outfile1 <- tempfile(fileext='.png') png(outfile1, width=400, height=300) fd2d_filter1 <- fd.estim.filter1(rf2d, p.index = 1, plot.loglog = TRUE, plot.allpoints = TRUE) dev.off() print("done 3") return(list(src = outfile1, contentType = 'image/png', width = 400, height = 300, alt = "fd2d_filter1") ) } return(list(src = tempfile())) }) # # # output$generateCheckReport <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-checkReport.pdf', sep='') }, content = function(file) { if(!is.null(v$AFMImageAnalyser)) { print("Exporting check report") v$AFMImageAnalyser@fullfilename<-file generateAFMImageReport(AFMImageAnalyser= v$AFMImageAnalyser, reportFullfilename=file, isCheckReport = TRUE) print("done") } } ) output$generateReport <- downloadHandler( filename = function() { paste(basename(v$AFMImageAnalyser@AFMImage@fullfilename), '-fullReport.pdf', sep='') }, content = function(file) { if(!is.null(v$AFMImageAnalyser)) { print("Exporting full report") v$AFMImageAnalyser@fullfilename<-file generateAFMImageReport(AFMImageAnalyser= v$AFMImageAnalyser, reportFullfilename=file, isCheckReport = FALSE) print("done") } } ) output$alreadyCalculatedPlot <- renderTable({ if(is.null(v$AFMImageAnalyser)) { shinyjs::disable("generateCheckReport") shinyjs::disable("generateReport") return() } dataAvailable<-FALSE if(!is.null(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)&& length(v$AFMImageAnalyser@variogramAnalysis@directionalVariograms)!=0) { checkAvailable<-"Yes" dataAvailable<-TRUE } else checkAvailable<-"No" if(!is.null(v$AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)&& length(v$AFMImageAnalyser@variogramAnalysis@omnidirectionalVariogram)!=0) { modelsAvalable<-"Yes" dataAvailable<-TRUE } else modelsAvalable<-"No" if(!is.null(v$AFMImageAnalyser@psdAnalysis)&& !is.null(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)&& length(v$AFMImageAnalyser@psdAnalysis@roughnessAgainstLengthscale)!=0) { print(v$AFMImageAnalyser@psdAnalysis) psdAvailable<-"Yes" dataAvailable<-TRUE } else psdAvailable<-"No" if(!is.null(v$AFMImageAnalyser@threeDimensionAnalysis)&& !is.null(v$AFMImageAnalyser@threeDimensionAnalysis@f1)&& length(v$AFMImageAnalyser@threeDimensionAnalysis@f1)!=0) { print(v$AFMImageAnalyser@threeDimensionAnalysis) threeDimensionAnalysisAvailable<-"Yes" dataAvailable<-TRUE } else threeDimensionAnalysisAvailable<-"No" if(!is.null(v$AFMImageAnalyser@fdAnalysis)&& length(v$AFMImageAnalyser@fdAnalysis@fractalDimensionMethods)!=0) { print(v$AFMImageAnalyser@fdAnalysis) fdAvailable<-"Yes" dataAvailable<-TRUE } else fdAvailable<-"No" if (dataAvailable) { shinyjs::enable("generateCheckReport") shinyjs::enable("generateReport") } availableCalculationsDF<-data.frame(Calculations=c("PSD analysis", "Normality and isotropy", "Variogram models", "Fractal analysis", "Networks analysis"), available=c(psdAvailable, checkAvailable, modelsAvalable, fdAvailable, threeDimensionAnalysisAvailable)) #print(availableCalculationsDF) return({xtable(availableCalculationsDF)}) }, include.rownames=FALSE, include.colnames=TRUE) # # Networks # output$imageNameNetworks<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNameNetworks<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } heights<-v$AFMImageAnalyser@AFMImage@data$h*input$heightNetworksslider heights<-heights+abs(min(heights)) print("imageNameNetworks - not null imaheName") output$imageNameNetworks<-imageName #print(imageName) }) output$smallBranchesNetworksNetworksCheckboxInput<-renderUI({ imageName<-displayImageName() if (is.null(imageName)) { output$imageNameNetworks<-renderUI(HTML(c("<h4>please select image first</h4>"))) return(NULL) } checkboxInput("smallBranchesNetworksNetworksCheckboxInput", "Small branches") }) output$distNetworksPlot <- renderPlot({ if (is.null(v$AFMImageAnalyser)) { return() } heights<-v$AFMImageAnalyser@AFMImage@data$h*input$heightNetworksslider heights<-heights+abs(min(heights)) bins <- seq(min(heights), max(heights), length.out = 50) # draw the histogram with the specified number of bins hist(heights, breaks = bins, col = 'darkgray', border = 'white') updateSliderInput(session, 'filterNetworksslider', value = c(1,10), min = 0.1, max = ceiling(max(heights*110/100)), step = 0.1) }) output$newImageNetworksPlot <- renderPlot({ input$checkFilterNetworksButton print("checkFilterNetworksButton button pushed") if (is.null(input$checkFilterNetworksButton)) { print("input$checkFilterNetworksButton==NULL") return(NULL) } print("input$checkFilterNetworksButton!=NULL") if(input$checkFilterNetworksButton == c(0)) { print("input$checkFilterNetworksButton==0") return() }else{ if (is.null(v$AFMImageAnalyser)) { return() } isolate({ input$checkFilterNetworksButton newAFMImage<-v$AFMImageAnalyser@AFMImage heights<-newAFMImage@data$h*input$heightNetworksslider heights<-heights+abs(min(heights)) heights[heights<input$filterNetworksslider[1]]<-0 heights[heights>input$filterNetworksslider[2]]<-0 newAFMImage@data$h<-heights getSpplotFromAFMImage(newAFMImage, expectedWidth=512, expectHeight= 512, withoutLegend=TRUE) displayIn3D(newAFMImage) }) } }) output$skeletonImageNetworksPlot <- renderPlot({ if (is.null(v$AFMImageAnalyser)|| is.null(v$AFMImageAnalyser@networksAnalysis)|| is.null(v$AFMImageAnalyser@networksAnalysis@skeletonGraph) ) { return() } library(igraph) nbVertices=length(V(v$AFMImageAnalyser@networksAnalysis@skeletonGraph)) print(nbVertices) #gridIgraphPlot(AFMImage, v$AFMImageAnalyser@networksAnalysis@skeletonGraph) displayColoredNetworkWithVerticesSize(v$AFMImageAnalyser@networksAnalysis) }) observeEvent(input$calculateNetworksNetworksButton, { input$calculateNetworksNetworksButton print("calculateNetworksNetworksButton button pushed") if (is.null(input$calculateNetworksNetworksButton)) { print("input$calculateNetworksNetworksButton==NULL") return(NULL) } print("input$calculateNetworksNetworksButton!=NULL") if(input$calculateNetworksNetworksButton == c(0)) { print("input$calculateNetworksNetworksButton==0") return() }else{ isolate({ input$calculateNetworksNetworksButton # Create a Progress object progressCalculateNetworks <- shiny::Progress$new() #progressPSD$set(message = "Calculting", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progressCalculateNetworks$close()) print("calculation of Networks") #createAFMImageAnalyser() AFMImageNetworksAnalysis = new("AFMImageNetworksAnalysis") # Create a closure to update progress updateProgressNetworkAnalysis<- function(value = NULL, detail = NULL, message = NULL) { if (!is.null(message)) { progressCalculateNetworks$set(message = message, value = 0) }else{ progressCalculateNetworks$set(value = value, detail = detail) } return(TRUE) } AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis # newAFMImage<-copy(v$AFMImageAnalyser@AFMImage) # # newAFMImage<-extractAFMImage(newAFMImage,0,0,184) # # heights<-newAFMImage@data$h*input$heightNetworksslider # heights<-heights+abs(min(heights)) # # heights[heights<input$filterNetworksslider[1]]<-0 # heights[heights>input$filterNetworksslider[2]]<-0 # # newAFMImage@data$h<-heights # # # networkAnalysis@heightNetworksslider=input$heightNetworksslider # networkAnalysis@filterNetworkssliderMin=input$filterNetworksslider[1] # networkAnalysis@filterNetworkssliderMax=input$filterNetworksslider[2] # # if(!is.null(v$AFMImageAnalyser@AFMImage)) { # print("Calculating networks") # networksAna<-calculateNetworks(AFMImageNetworksAnalysis= networkAnalysis, AFMImage= newAFMImage) # } newAFMImage<-copy(v$AFMImageAnalyser@AFMImage) #newAFMImage<-extractAFMImage(newAFMImage,0,0,80) AFMImageNetworksAnalysis@heightNetworksslider=input$heightNetworksslider AFMImageNetworksAnalysis@filterNetworkssliderMin=input$filterNetworksslider[1] AFMImageNetworksAnalysis@filterNetworkssliderMax=input$filterNetworksslider[2] AFMImageNetworksAnalysis@smallBranchesTreatment=TRUE AFMImageNetworksAnalysis@updateProgress(message="Transform image", value=0) AFMImageNetworksAnalysis@updateProgress(value= 0, detail = "1/8") AFMImageNetworksAnalysis<-transformAFMImageForNetworkAnalysis(AFMImageNetworksAnalysis, AFMImage= newAFMImage) newAFMImage<-AFMImageNetworksAnalysis@binaryAFMImage displayIn3D(newAFMImage, noLight=TRUE) if(!is.null(v$AFMImageAnalyser@AFMImage)) { print("Calculating networks") AFMImageNetworksAnalysis@updateProgress(message="Identify nodes") AFMImageNetworksAnalysis@updateProgress(value= 2, detail = "2/8") AFMImageNetworksAnalysis<-identifyNodesAndEdges(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis, maxHeight= AFMImageNetworksAnalysis@filterNetworkssliderMax) # Create a closure to update progress AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@updateProgress(message="Identify edges") AFMImageNetworksAnalysis@updateProgress(value= 3, detail = "3/8") AFMImageNetworksAnalysis<-identifyEdgesFromCircles(AFMImageNetworksAnalysis= AFMImageNetworksAnalysis, MAX_DISTANCE = 75) AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@updateProgress(message="Identify isolated nodes", detail = "4/8") AFMImageNetworksAnalysis@updateProgress(value= 4) AFMImageNetworksAnalysis<-identifyIsolatedNodes(AFMImageNetworksAnalysis) AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@updateProgress(message="Create networks", detail = "5/8") AFMImageNetworksAnalysis@updateProgress(value= 5) AFMImageNetworksAnalysis<-createGraph(AFMImageNetworksAnalysis) AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@updateProgress(message="Calculate shortest path", detail = "6/8") AFMImageNetworksAnalysis@updateProgress(value= 6) AFMImageNetworksAnalysis<-calculateShortestPaths(AFMImageNetworksAnalysis=AFMImageNetworksAnalysis) AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@shortestPaths AFMImageNetworksAnalysis@updateProgress(message="Calculate network parameters and holes characteristics", detail = "7/8") AFMImageNetworksAnalysis@updateProgress(value= 7) AFMImageNetworksAnalysis<-calculateNetworkParameters(AFMImageNetworksAnalysis=AFMImageNetworksAnalysis, AFMImage=v$AFMImageAnalyser@AFMImage) AFMImageNetworksAnalysis@updateProgress<- updateProgressNetworkAnalysis AFMImageNetworksAnalysis@networksCharacteristics AFMImageNetworksAnalysis@graphEvcent AFMImageNetworksAnalysis@graphBetweenness AFMImageNetworksAnalysis<-calculateHolesCharacteristics(AFMImageNetworksAnalysis=AFMImageNetworksAnalysis) } print("calculation of networks done") v$AFMImageAnalyser@networksAnalysis<-AFMImageNetworksAnalysis print("done v$AFMImageAnalyser@networksAnalysis<-AFMImageNetworksAnalysis") }) } }) })
/scratch/gouwar.j/cran-all/cranData/AFM/inst/shiny/AFM-desktop/server.R
library(shiny) library(shinyjs) library(rgl) shinyUI( navbarPage('AFM Image Analysis', tabPanel('File', sidebarLayout( sidebarPanel( uiOutput('choose_inputtype'), uiOutput('choose_type'), uiOutput('choose_dataset'), tags$hr(), htmlOutput("displayIn3DFileButton"), #actionButton('displayIn3DFileButton', label = 'Display 3D model'), downloadButton('saveRdataFileButton', label = 'Export calculations') ), mainPanel( uiOutput('imageInformationsUI'), tableOutput('basicInfoFileTable'), uiOutput('roughnessUI'), tableOutput('roughnessesFileTable') # ,tags$script(src="AFM.js") ) )), tabPanel('Gaussian Mixtures', sidebarLayout( sidebarPanel( uiOutput('imageNameGaussianMix'), tags$hr(), numericInput("mepsilonGaussianMix", "Convergence criterion:", 1e-4, min = 1e-20, max = 1e-2), sliderInput("minmaxGaussianMix", label = h5("Number of components"), min = 2, max = 8, step = 1, value = c(2, 3)), tags$hr(), actionButton('calculateGaussianMixButton',label='Calculate'), tags$hr(), uiOutput('downloadGaussianMixSummaryButton'), uiOutput('downloadGaussianMixCDFCheckButton'), uiOutput('downloadGaussianMixDensityCheckButton'), uiOutput('downloadGaussianMixHeightsButton'), uiOutput('downloadGaussianMixCountsCheckButton') ) , mainPanel( tabPanel("Plot", fluidRow( uiOutput('plotGaussianMixUI'), plotOutput("plotGaussianMixPlot"), uiOutput('summaryGaussianMixUI'), verbatimTextOutput("gaussianMixSummary")) ) ) )), navbarMenu('PSD', tabPanel('Calculation', sidebarLayout( sidebarPanel( uiOutput('imageNamePSD'), tags$hr(), sliderInput('breaksSliderPSD', label = 'Breaks in PSD 2D to calculate PSD 1D', min = 1, max = 7, value = 5, step=1, ticks=FALSE), actionButton('RoughnessByLengthScaleButton', label = 'Calculate'), tags$hr(), uiOutput('downloadPSDPSDButton'), uiOutput('downloadRoughnessVsLengthscalePSDButton') ) , mainPanel( uiOutput('plotPSDUI'), plotOutput('plotPSD') , uiOutput('plotPSDRvsLUI'), plotOutput('plotPSDRvsL') ) )), tabPanel('Analysis', sidebarLayout( sidebarPanel( uiOutput('imageNameAnalysisPSD'), tags$hr(), sliderInput('firstSlopeSliderPSD', label = 'First tangent', min = 1, max = 124, value = c(1,32), step=1, ticks=FALSE), sliderInput('lcSliderPSD', label = 'Lc tangent', min = 1, max = 124, value = c(10,70), step=1, ticks=FALSE), actionButton('RoughnessByLengthScaleAnalysisButton', label = 'Calculate'), tags$hr(), uiOutput('downloadRoughnessVsLengthscaleAnalysisPSDButton') ) , mainPanel( uiOutput('plotAnalysisPSDRvsLUI'), plotOutput('plotAnalysisPSDRvsL') ) ) )), navbarMenu('Variance', tabPanel('Checks', sidebarLayout( sidebarPanel( uiOutput('imageNameCheck'), tags$hr(), sliderInput('sampleIsotropyVarianceCheckSlider', label = 'Sample to calculate directional variograms', min = 1, max = 100, value = 100, step=1), actionButton('checkNormalityIsotropyCheckButton',label='Check normality and isotropy') ) , mainPanel( uiOutput('normalityVarianceCheckUI'), imageOutput('normalityIsotropyVarianceCheckImage'), uiOutput('isotropyVarianceCheckUI'), plotOutput('directionalVariogramsVarianceCheckImage') ) ) ), tabPanel('Models', sidebarLayout( sidebarPanel( uiOutput('imageNameVarianceModels'), tags$hr(), sliderInput('sampleVariogramModelsSlider', label = 'Sample to calculate directional variograms', min = 1, max = 100, value = 100, step=1), sliderInput('sampleFitVarianceModelsSlider', label = 'Sample to fit models', min = 0, max = 4, value = 3.43, step=0.01), sliderInput('sampleValidateVarianceModelsSlider', label = 'Sample to validate models', min = 1, max = 100, value = 100), actionButton('fitVariogramVarianceModelsButton',label='Fit variogram models') ) , mainPanel( uiOutput('bestmodeltableVarianceModelsUI'), tableOutput('bestmodeltableVarianceModelsPlot'), imageOutput('allmodelsModelImage') ) ) ) ), tabPanel('Fractal', sidebarLayout( sidebarPanel( uiOutput('imageNameFractal'), tags$hr(), actionButton('calculateFractalDimensionsButton',label='Calculate') ) , mainPanel( uiOutput('fractalDimensionsFractalUI'), tableOutput('fractalDimensionsFractalTable'), imageOutput('fractalDimensionsFractalPlots_fd2d_isotropic'), imageOutput('fractalDimensionsFractalPlots_fd2d_squareincr'), imageOutput('fractalDimensionsFractalPlots_fd2d_filter1') ) )), tabPanel('Networks', sidebarLayout( sidebarPanel( uiOutput('imageNameNetworks'), registerSceneChange(), tags$hr(), sliderInput('heightNetworksslider', label = 'Height multiplier', min = 0.1, max = 10, value = 1, step=0.1), sliderInput('filterNetworksslider', label = 'Filter', min = 0.1, max = 10, value = c(1,10), step=0.1), actionButton('checkFilterNetworksButton', label = 'Check filter'), uiOutput('smallBranchesNetworksNetworksCheckboxInput'), actionButton('calculateNetworksNetworksButton', label = 'Calculate networks') ), mainPanel( uiOutput('panelNetworksUI'), plotOutput("skeletonImageNetworksPlot"), plotOutput("newImageNetworksPlot"), plotOutput("distNetworksPlot") ) )), tabPanel('3D', sidebarLayout( sidebarPanel( uiOutput('imageName3D'), registerSceneChange(), tags$hr(), sliderInput('height3Dslider', label = 'Height multiplier', min = 0.1, max = 10, value = 1, step=0.1), actionButton('displayIn3D3DButton', label = 'Display 3D image'), downloadButton('snapshot3DButton', label = 'Snapshot'), tags$hr(), actionButton('calculate3DModel3DButton', label = 'Calculate 3D model for printing'), downloadButton('export3DModel3DButton',label='Export model for 3D printing') ), mainPanel( uiOutput('panel3DUI') ,rglwidgetOutput('thewidget', width = "100%", height = 600) ) )), tabPanel('Reports', sidebarLayout( sidebarPanel( uiOutput('imageNameReports'), tags$hr(), downloadButton('generateCheckReport', label = 'Download check report'), tags$hr(), downloadButton('generateReport', label = 'Download full report') ), mainPanel( tableOutput('alreadyCalculatedPlot') ) )), tabPanel('About', mainPanel( tags$iframe( seamless="seamless", src="http://www.afmist.org/index_afmapp.html",style="width: 400px; height: 400px") ) ), tags$head(tags$script(src="google-analytics.js")), useShinyjs() ))
/scratch/gouwar.j/cran-all/cranData/AFM/inst/shiny/AFM-desktop/ui.R
#' @title Hodrick-Prescott filter for time series data #' @description #' Hodrick-Prescott filter is a data smoothing technique that removes trending in time series data frame #' @param x time-series vector #' @param type character, indicating the filter type #' @param freq integer #' @param drift logical #' @import stats #' @examples #' data(macroKZ) #' HP(macroKZ[,2]) #' @rdname HP #' @export HP<- function (x, freq = NULL, type = c("lambda", "frequency"), drift = FALSE) { if (is.null(drift)) drift <- FALSE xname = deparse(substitute(x)) type = match.arg(type) if (is.null(type)) type <- "lambda" if (is.ts(x)) { tsp.x <- tsp(x) frq.x <- frequency(x) if (type == "lambda") { if (is.null(freq)) { if (frq.x == 1) lambda = 6 if (frq.x == 4) lambda = 1600 if (frq.x == 12) lambda = 129600 } else lambda = freq } } else { if (type == "lambda") { if (is.null(freq)) stop("freq is NULL") else lambda = freq } } if (type == "frequency") { if (is.null(freq)) stop("freq is NULL") else lambda = (2 * sin(pi/freq))^-4 } undrift <- function(x) { n <- nrow(x) X <- cbind(rep(1, n), 1:n) b <- solve(t(X) %*% X) %*% t(X) %*% x x - X %*% b } xo = x x = as.matrix(x) if (drift) x = undrift(x) n = length(x) imat = diag(n) Ln = rbind(matrix(0, 1, n), diag(1, n - 1, n)) Ln = (imat - Ln) %*% (imat - Ln) Q = t(Ln[3:n, ]) SIGMA.R = t(Q) %*% Q SIGMA.n = diag(n - 2) g = t(Q) %*% as.matrix(x) b = solve(SIGMA.n + lambda * SIGMA.R, g) x.cycle = c(lambda * Q %*% b) x.trend = x - x.cycle if (is.ts(xo)) { tsp.x = tsp(xo) x.cycle = ts(x.cycle, start = tsp.x[1], frequency = tsp.x[3]) x.trend = ts(x.trend, start = tsp.x[1], frequency = tsp.x[3]) x = ts(x, start = tsp.x[1], frequency = tsp.x[3]) } A = lambda * Q %*% solve(SIGMA.n + lambda * SIGMA.R) %*% t(Q) res <- list(cycle = x.cycle, trend = x.trend, fmatrix = A, title = "Hodrick-Prescott Filter", xname = xname, call = as.call(match.call()), type = type, lambda = lambda, method = "hpfilter", x = x) return(structure(res, class = "mFilter")) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/HP.R
#' @title Breusch-Godfrey test [BG test] #' @description #' BG test is used to test for autocorrelation in the errors of a regression model #' @param model is a (generalized)linear regression model #' @param order integer. maximal order of serial correlation to be tested. #' @param order.by Either a vector z or a formula with a single explanatory variable like ~ z #' @param type the type of test statistic to be returned #' @param data an optional data frame containing the variables in the model #' @param fill starting values for the lagged residuals in the auxiliary regression. By default 0 but can also be set to NA. #' @references Mitchel, D. and Zeileis, A. Published 2021-11-07. lmtest package #' @import stats #' @importFrom cli console_width #' @examples #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' bg(model) #' @export bg<- function (model, order = 1, order.by = NULL, type = c("Chisq", "F"), data = list(), fill = 0) { dname <- paste(deparse(substitute(model))) if (!inherits(model, "formula")) { X <- if (is.matrix(model$x)) model$x else model.matrix(terms(model), model.frame(model)) y <- if (is.vector(model$y)) model$y else model.response(model.frame(model)) } else { mf <- model.frame(model, data = data) y <- model.response(mf) X <- model.matrix(model, data = data) } if (!is.null(order.by)) { if (inherits(order.by, "formula")) { z <- model.matrix(order.by, data = data) z <- as.vector(z[, ncol(z)]) } else { z <- order.by } X <- as.matrix(X[order(z), ]) y <- y[order(z)] } n <- nrow(X) k <- ncol(X) order <- 1:order m <- length(order) resi <- lm.fit(X, y)$residuals Z <- sapply(order, function(x) c(rep(fill, length.out = x), resi[1:(n - x)])) if (any(na <- !complete.cases(Z))) { X <- X[!na, , drop = FALSE] Z <- Z[!na, , drop = FALSE] y <- y[!na] resi <- resi[!na] n <- nrow(X) } auxfit <- lm.fit(cbind(X, Z), resi) cf <- auxfit$coefficients vc <- chol2inv(auxfit$qr$qr) * sum(auxfit$residuals^2)/auxfit$df.residual names(cf) <- colnames(vc) <- rownames(vc) <- c(colnames(X), paste("lag(resid)", order, sep = "_")) switch(match.arg(type), Chisq = { bg <- n * sum(auxfit$fitted.values^2)/sum(resi^2) p.val <- pchisq(bg, m, lower.tail = FALSE) df <- m names(df) <- "df" }, F = { uresi <- auxfit$residuals bg <- ((sum(resi^2) - sum(uresi^2))/m)/(sum(uresi^2)/(n - k - m)) df <- c(m, n - k - m) names(df) <- c("df1", "df2") p.val <- pf(bg, df1 = df[1], df2 = df[2], lower.tail = FALSE) }) #print a <- c("LM test", "p-value") b <- c(round(bg, 3), round(p.val,3)) w1 <- max(nchar(a)) w2 <- max(nchar(b)) w3 <- console_width() w <- sum(w1, w2, 7) n <- length(b) alternative <-NULL cat(format(as.character(paste("Breusch-Godfrey test for serial correlation of order up to", max(order))), width=w3, justify="centre"), "\n\n") if (p.val>=0.05) cat(paste("Residuals are not autocorrelated.", "\n")) else cat(paste("Residuals are autocorrelated.", "\n")) cat(rep("-", w), sep = "", "\n") for (i in seq(n)) { cat(fl(a[i], w1),fsp(),fsp(), fg(b[i], w2), "\n") } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/bg.R
#' @title Breusch-Pagan test #' @description #' Breusch-Pagan test is used to test against heteroskedasticity of a time-series #' @param model is a (generalized)linear regression model #' @param varformula a formula describing only the potential explanatory variables for the variance (no dependent variable needed). By default the same explanatory variables are taken as in the main regression model. #' @param studentize logical. If set to TRUE Koenker's studentized version of the test statistic will be used. #' @param data an optional data frame containing the variables in the model #' @import stats #' @importFrom cli console_width #' @examples #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' bp(model) #' @references Torsten, H., Zeileis, A., Farebrother, Richard W., Cummins, C., Millo, G., Mitchell, D., lmtest package #' Wang, B., 2014, bstats package #' @export bp<-function (model, varformula = NULL, studentize = TRUE, data = list()) { dname <- paste(deparse(substitute(model))) if (!inherits(model, "model")) { X <- if (is.matrix(model$x)) model$x else model.matrix(terms(model), model.frame(model)) y <- if (is.vector(model$y)) model$y else model.response(model.frame(model)) Z <- if (is.null(varformula)) X else model.matrix(varformula, data = data) } else { mf <- model.frame(model, data = data) y <- model.response(mf) X <- model.matrix(model, data = data) Z <- if (is.null(varformula)) X else model.matrix(varformula, data = data) } if (!(all(c(row.names(X) %in% row.names(Z), row.names(Z) %in% row.names(X))))) { allnames <- row.names(X)[row.names(X) %in% row.names(Z)] X <- X[allnames, ] Z <- Z[allnames, ] y <- y[allnames] } if (ncol(Z) < 2) stop("the auxiliary variance regression requires at least an intercept and a regressor") k <- ncol(X) n <- nrow(X) resi <- lm.fit(X, y)$residuals sigma2 <- sum(resi^2)/n if (studentize) { w <- resi^2 - sigma2 aux <- lm.fit(Z, w) bp <- n * sum(aux$fitted.values^2)/sum(w^2) method <- "studentized Breusch-Pagan test" } else { f <- resi^2/sigma2 - 1 aux <- lm.fit(Z, f) bp <- 0.5 * sum(aux$fitted.values^2) method <- "Breusch-Pagan test" } #message1<-cat("Homoskedasticity presents.", "\n", "Please use other tests additionally.In case of opposite results study the case further.") #message2<-cat("Heteroskedasticity presents.", "\n", "Please use others tests additionally.In case of opposite results study the case further.") df <- c(df = aux$rank - 1) PVAL<-pchisq(bp, df, lower.tail = FALSE) #print a <- c("BP", "p-value") b <- c(round(bp, 3), round(PVAL,3)) w1 <- max(nchar(a)) w2 <- max(nchar(b)) w3 <- console_width() w <- sum(w1, w2, 7) n <- length(b) cat(format(as.character("Breusch-Pagan test"), width=w3, justify="centre"), "\n") if (PVAL>=0.05) cat(paste("Homoskedasticity presents.", "Please use other tests additionally.", "In case of opposite results study the case further.", sep="\n", "\n")) else cat(paste("Heteroskedasticity presents.", "Please use other tests additionally.", "In case of opposite results study the case further.", sep="\n", "\n")) cat(rep("-", w), sep = "", "\n") for (i in seq(n)) { cat(fl(a[i], w1),fsp(),fsp(), fg(b[i], w2), "\n") } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/bp.R
#' All possible regression variable coefficients. #' #' Returns the coefficients for each variable from each model. #' #' @param object An object of class \code{lm}. #' @param ... Other arguments. #' #' @return \code{check_betas} returns a \code{data.frame} containing: #' #' \item{x}{model} #' #' @examples #' model <- lm(real_gdp~imp+exp+usdkzt+eurkzt, data = macroKZ) #' check_betas(model) #' @references Hebbali, Aravind. Published 2020-02-10. olsrr package #' @import stats #' @export #' check_betas <- function(object, ...) { if (!all(class(object) == "lm")) { stop("Please specify a OLS linear regression model.", call. = FALSE) } if (length(object$coefficients) < 3) { stop("Please specify a model with at least 2 predictors.", call. = FALSE) } betas <- NULL rsq <- NULL lpreds <- NULL metrics <- allpos_helper(object) beta_names <- names(metrics$betas) mindex <- seq_len(length(metrics$rsq)) reps <- metrics$lpreds + 1 m_index <- rep(mindex, reps) beta <- metrics$betas model$call$formula<-as.formula(model) s<-splitFormula(object$call$formula, sep="+") s<-gsub("~",replacement="",x=s,ignore.case = TRUE) n<-length(s) if (n>=0) N<-function(n){ if (n==1) return(1) else if (n>=2) return (2*N(n-1)+1) } cat(paste("Based on the chosen regression model", N(n),"models can be generated. See below:"), sep="\n") data.frame(model = m_index, predictor = beta_names, beta = beta) } #' All possible regression internal #' #' Internal function for all possible regression. #' #' @param model An object of class \code{lm}. #' @importFrom utils combn #' @noRd allpos_helper <- function(model) { nam <- coeff_names(model) n <- length(nam) r <- seq_len(n) combs <- list() for (i in seq_len(n)) { combs[[i]] <- combn(n, r[i]) } predicts <- nam lc <- length(combs) varnames <- model_colnames(model) len_preds <- length(predicts) gap <- len_preds - 1 data <- mod_sel_data(model) space <- coeff_length(predicts, gap) colas <- unname(unlist(lapply(combs, ncol))) response <- varnames[1] p <- colas t <- cumsum(colas) q <- c(1, t[-lc] + 1) mcount <- 0 rsq <- list() adjrsq <- list() predrsq <- list() cp <- list() aic <- list() sbic <- list() sbc <- list() msep <- list() fpe <- list() apc <- list() hsp <- list() preds <- list() lpreds <- c() betas <- c() for (i in seq_len(lc)) { for (j in seq_len(colas[i])) { predictors <- nam[combs[[i]][, j]] lp <- length(predictors) out <- ols_regress(paste(response, "~", paste(predictors, collapse = " + ")), data = data) mcount <- mcount + 1 lpreds[mcount] <- lp rsq[[mcount]] <- out$rsq adjrsq[[mcount]] <- out$adjr predrsq[[mcount]] <- ols_pred_rsq(out$model) cp[[mcount]] <- ols_mallows_cp(out$model, model) aic[[mcount]] <- ols_aic(out$model) sbic[[mcount]] <- ols_sbic(out$model, model) sbc[[mcount]] <- ols_sbc(out$model) msep[[mcount]] <- ols_msep(out$model) fpe[[mcount]] <- ols_fpe(out$model) apc[[mcount]] <- ols_apc(out$model) hsp[[mcount]] <- ols_hsp(out$model) preds[[mcount]] <- paste(predictors, collapse = " ") betas <- append(betas, out$betas) } } result <- list( lpreds = lpreds, rsq = rsq, adjrsq = adjrsq, predrsq = predrsq, cp = cp, aic = aic, sbic = sbic, sbc = sbc, msep = msep, fpe = fpe, apc = apc, hsp = hsp, preds = preds, lc = lc, q = q, t = t, betas = betas ) return(result) } #' Coefficient names #' #' Returns the names of the coefficients including interaction variables. #' #' @param model An object of class \code{lm}. #' @noRd #' coeff_names <- function(model) { terms <- NULL colnames(attr(model$terms, which = "factor")) } #' Model data columns #' #' Returns the names of the columns in the data used in the model. #' #' @param model An object of class \cdoe{lm}. #' @noRd #' model_colnames <- function(model) { names(model.frame(model)) } #' Coefficients length #' #' Returns the length of the coefficient names. #' #' @param predicts Name of the predictors in the model. #' @param gap A numeric vector. #' @noRd #' coeff_length <- function(predicts, gap) { sum(nchar(predicts)) + gap } mod_sel_data <- function(model) { eval(model$call$data) } l <- function(x) { x <- as.character(x) k <- grep("\\$", x) if (length(k) == 1) { temp <- strsplit(x, "\\$") out <- temp[[1]][2] } else { out <- x } return(out) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/check_betas.R
#' @title Preliminary data check for errors #' @description #' Preliminary check of data frame for missing values, numeric format, outliers. #' #' Missing items: The number of missing values in each column of the dataset. #' Numeric format: The number of non-numeric variables in each column of the dataset. #' Outliers: The number of outliers in each column of the dataset. #' @usage checkdata(x) #' @param x is a data frame #' @import stats #' @importFrom tseries na.remove #' @importFrom utils data #' @importFrom cli console_width #' @examples #' data(macroKZ) #' checkdata(macroKZ) #' @export #must be without N/A to run checkdata<- function (x) { miss<-sapply(x, function(x){ sum(is.na(x))}) missing_total=0 for (i in 1:length(miss)){ missing_total = missing_total+miss[[i]] } number<-sapply(x, function(x){ as.logical(sum(is.numeric(x)))}) out <- function(x){ count=0 sdt<-3*sd(x, na.rm=TRUE) m<-mean(x) for (c in x) { if ((c>m+sdt) & (c<m-sdt)) count<-count+1 return(count)} } outlier<-sapply(na.remove(x), out) outlier_total=0 for (i in 1:length(outlier)){ outlier_total = outlier_total+outlier[[i]] } notnum_total=0 for (i in 1:length(number)){ if (!isTRUE(i)) number[[i]]=0 notnum_total = notnum_total+number[[i]] } #print w <- console_width() w1 <- max(nchar(x)) n <- ncol(x) cat(paste("There are", missing_total, "missing items in the dataset."), paste("There are", notnum_total, "items in non-numeric format in the dataset."), paste("There are", outlier_total, "outliers in the dataset."), sep="\n") #missing_items cat(rep("-", w), sep = "", "\n") cat(fc("Missing items", w), "\n") cat(rep("-", w), sep = "", "\n") print(list(miss)) cat(rep("-", w), sep = "", "\n\n") #numeric_format cat(rep("-", w), sep = "", "\n") cat(fc("Numeric format", w), "\n") cat(rep("-", w), sep = "", "\n") print(list(number)) cat(rep("-", w), sep = "", "\n\n") #outliers cat(rep("-", w), sep = "", "\n") cat(fc("Outliers", w), "\n") cat(rep("-", w), sep = "", "\n") print(list(outlier)) cat(rep("-", w), sep = "", "\n\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/checkdata.R
#' @title Multicollinearity test #' @description #' multicollinearity is the occurence of high interrelations among two or more independent variables in a multiple regression. #' @param x is a numeric vector or matrix #' @param thrs threshold set to calculate correlation above #' @param num logical #' @import stats #' @examples #' data(macroKZ) #' corsel(macroKZ,num=FALSE,thrs=0.65) #' @rdname corsel #' @export #data must be without period and NAs (d1<-d[,-1], d<-as.ts(macroKZ)) corsel<- function (x, thrs,num) { if (any(thrs > 1 | thrs < 0)) stop("`thrs` should be on [0,1]", call. = FALSE) c_Rank<-ifelse(abs(cor(x)>=thrs),TRUE,FALSE) c_Rank<-as.data.frame(c_Rank) c<-abs(cor(x)) if (num==FALSE) print(c_Rank) else print(round(c, digits=3)) } #for (c in 1:ncol(c_Rank)) { #R<-c() #for (r in 1:nrow(c_Rank)) { #if (c_Rank[r,c]==FALSE) { #R<-c(R,rownames(c_Rank)[r]) #} #} #print(paste(c(colnames(c_Rank)[c], "has an appropriate correlation with", R), collapse=" ")) #}
/scratch/gouwar.j/cran-all/cranData/AFR/R/corsel.R
#' @title Decomposition plot #' @description #' The function depicts decomposition of regressors as a stacked barplot #' @param model An object of class \code{lm}. #' @param dataset A dataset based on which model was built #' @param print_plot logical #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ usdkzt + eurkzt + imp+exp, data = macroKZ) #' dec_plot(model, macroKZ) #' @references Hebbali, Aravind. Published 2020-02-10. olssr package #' @author The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market (AFR) #' @import ggplot2 #' @importFrom nlme splitFormula #' @importFrom zoo as.yearqtr #' @export #' dec_plot <- function(model, dataset,print_plot = TRUE) { m<-matrix(1) model$call$formula<-as.formula(model) s<-splitFormula(model$call$formula, sep="+") s<-gsub("~",replacement="",x=s,ignore.case = TRUE) for (i in 1:length(s)){ macro_name <- s[i] one_macro <- dataset[,macro_name] m<-cbind(m,one_macro) } tmp<-data.frame(t(coef(model)*t(m))) tmp<-tmp[,-1] tmp<-na.omit(tmp) names(tmp)<-s new<-data.frame() for (c in 1:ncol(tmp)) { t<-tmp[[c]] df<-data.frame(t) variable<-colnames(tmp)[[c]] df$variable <- variable df$date <- rownames(tmp) new<-rbind(new,df) } new$date<-as.yearqtr(time(dataset)) d<-aggregate(new$t, by=list(date=new$date), sum) names(d)<-c("date", "value") p <- ggplot(new, aes(fill=variable, y=t,x=date))+ geom_bar(position="stack", stat="identity")+ scale_fill_brewer(palette="Paired")+ guides(x = guide_axis(angle = 90))+ #geom_line(data=d, aes(x=date,y=value, group=1))+ xlab("period") + ylab("Value") + ggtitle("Decomposition plot")+ theme(plot.title=element_text(face="bold", size=18,hjust=0.5))+ theme(axis.title=element_text(face="bold")) if (print_plot) { print(p) } else { return(p) } }
/scratch/gouwar.j/cran-all/cranData/AFR/R/dec_plot.R
#' @title Transforming time-series data to stationary #' @description #' Percent change is a change between two consecutive terms, % #' @usage pct1(x) #' @import stats #' @param x time-series vector(s) #' @examples #' data (macroKZ) #' new<-pct1(macroKZ) #' @rdname pct1 #' @export pct1<-function(x)((x/stats::lag(x)-1)*100) #' @title Transforming time-series data to stationary #' @description #' Percent change is a change between a term and its lagged value for prior period, % #' @usage pct4(x) #' @import stats #' @param x time-series vector(s) #' @examples #' data (macroKZ) #' new<-pct4(macroKZ) #' @rdname pct4 #' @export pct4<-function(x){ x4<-stats::lag(x,4) p<-(x4/x-1)*100 return(p) } #tr<-function(x){ #trend<-rollaply(m,width=period,fill=NA, align="center", FUN=mean,na.rm=TRUE) #season<-m-trend #figure<-numeric(period) #l<-length(m) #index<-seq.int(1,l,by=period)-1 #for (i in 1:period) figure[i]<-median(season) #} #' @title Transforming time-series data to stationary #' @description #' Difference of logarithms is finding the difference between two consecutive logarithm values of a time-series #' @param x time-series vector #' @param difference difference between x items #' @param lag lagged period #' @import stats #' @importFrom rlang abort #' @importFrom xts diff.xts #' @examples #' data (macroKZ) #' new<-pct1(macroKZ) #' @rdname difflog #' @export difflog<- function (x, lag = 1, difference = 1) { if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.") x<-log(x) ret_vec <- xts::diff.xts(x = x, lag = lag, differences = difference, arithmetic = TRUE, na.pad = TRUE) pad_len <- length(x) - length(ret_vec) if (pad_len > 0) { ret_vec <- c(rep(NA, pad_len), ret_vec) } return(ret_vec) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/diff.R
#' @title finratKZ dataset #' @format Dataset of 400 corporate borrowers, i.e. 200 standard (IFRS stage 1) and 200 default ones, characterized by 29 financial ratios. #' \describe{ #' \item{Default}{Dummy variable where 0 - standard(IFRS stage 1) borrower, 1 - default borrower} #' \item{Rev_gr}{Revenue growth rate} #' \item{EBITDA_gr}{EBITDA growth rate} #' \item{Cap_gr}{Capital growth rate} #' \item{CR}{Current ratio} #' \item{QR}{Quick ratio} #' \item{Cash_ratio}{Cash ratio} #' \item{WC_cycle}{Working capital cycle} #' \item{DTA}{Debt-to-assets} #' \item{DTE}{Debt-to-equity} #' \item{LR}{Leverage ratio (Total assets/Total equity)} #' \item{EBITDA_debt}{EBITDA-to-debt} #' \item{IC}{Interest coverage (Income statement)} #' \item{CTI}{Cash-to-income} #' \item{IC_CF}{Interest coverage (Cash flow statement)} #' \item{DCR}{Debt coverage ratio (Cash flow from operations/Total debt)} #' \item{CFR}{Cash flow to revenue} #' \item{CRA}{Cash return on assets (Cash flow from operations/Total assets)} #' \item{CRE}{Cash return on equity (Cash flow from operations/Total equity)} #' \item{ROA}{Return on assets} #' \item{ROE}{Return on equity} #' \item{NPM}{Net profit margin} #' \item{GPM}{Gross profit margin} #' \item{OPM}{Operating profit margin} #' \item{RecT}{Receivables turnover} #' \item{InvT}{Inventory turnover} #' \item{PayT}{Payables turnover} #' \item{TA}{Total assets turnover} #' \item{FA}{Fixed assets turnover} #' \item{WC}{Working capital turnover} #' } #' @references The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market #' @export "finratKZ"
/scratch/gouwar.j/cran-all/cranData/AFR/R/finratKZ.R
#' @title Godfrey-Quandt test #' @description #' Godfrey-Quandt test is used to test against heteroskedasticity of a time-series #' @param model is a (generalized)linear regression model #' @param point numerical. If point is smaller than 1 it is interpreted as percentages of data #' @param fraction numerical. The number of central observations to be omitted. #' @param alternative a character string specifying the alternative hypothesis. #' @param order.by Either a vector z or a formula with a single explanatory variable like ~ z #' @param data an optional data frame containing the variables in the model. #' @importFrom lmtest gqtest #' @importFrom cli console_width #' @examples #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' gq(model) #' @references Torsten, H., Zeileis, A., Farebrother, Richard W., Cummins, C., Millo, G., Mitchell, D., lmtest package #' Wang, B., 2014, bstats package #' @export gq<- function (model, point = 0.5, fraction = 0, alternative = c("greater", "two.sided", "less"), order.by = NULL, data = list()) { dname <- paste(deparse(substitute(model))) alternative <- match.arg(alternative) if (!inherits(model, "formula")) { X <- if (is.matrix(model$x)) model$x else model.matrix(terms(model), model.frame(model)) y <- if (is.vector(model$y)) model$y else model.response(model.frame(model)) } else { mf <- model.frame(model, data = data) y <- model.response(mf) X <- model.matrix(model, data = data) } k <- ncol(X) n <- nrow(X) if (point > 1) { if (fraction < 1) fraction <- floor(fraction * n) point1 <- point - ceiling(fraction/2) point2 <- point + ceiling(fraction/2 + 0.01) } else { if (fraction >= 1) fraction <- fraction/n point1 <- floor((point - fraction/2) * n) point2 <- ceiling((point + fraction/2) * n + 0.01) } if (point2 > n - k + 1 | point1 < k) stop("inadmissable breakpoint/too many central observations omitted") if (!is.null(order.by)) { if (inherits(order.by, "formula")) { z <- model.matrix(order.by, data = data) z <- as.vector(z[, ncol(z)]) } else { z <- order.by } X <- as.matrix(X[order(z), ]) y <- y[order(z)] } rss1 <- sum(lm.fit(as.matrix(X[1:point1, ]), y[1:point1])$residuals^2) rss2 <- sum(lm.fit(as.matrix(X[point2:n, ]), y[point2:n])$residuals^2) mss <- c(rss1/(point1 - k), rss2/(n - point2 + 1 - k)) gq <- mss[2]/mss[1] df <- c(n - point2 + 1 - k, point1 - k) PVAL <- switch(alternative,two.sided = (2 * min(pf(gq, df[1], df[2]), pf(gq, df[1], df[2], lower.tail = FALSE))), less = pf(gq, df[1], df[2]), greater = pf(gq, df[1], df[2], lower.tail = FALSE)) #print a <- c("GQ", "p-value", "df1", "df2") b <- c(round(gq, 3), round(PVAL,3), df[1], df[2]) w1 <- max(nchar(a)) w2 <- max(nchar(b)) w3 <- console_width() w <- sum(w1, w2, 7) n <- length(b) cat(format(as.character("Goldfeld-Quandt test"), width=w3, justify="centre"), "\n") if (PVAL>=0.05) cat(paste("Homoskedasticity presents.", "Please use other tests additionally.", "In case of opposite results study the case further.", sep="\n", "\n")) else cat(paste("Heteroskedasticity presents.", "Please use other tests additionally.", "In case of opposite results study the case further.", sep="\n", "\n")) cat(rep("-", w), sep = "", "\n") for (i in seq(n)) { cat(fl(a[i], w1),fsp(),fsp(), fg(b[i], w2), "\n") } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/gq.R
#' @title macroKZ dataset #' @format A time series data frame of 54 quarterly observations of 50 macroeconomic and 10 financial parameters for 2010-2023 period. #' \describe{ #' \item{real_gdp}{Real GDP} #' \item{GDD_Agr_R}{Real gross value added Agriculture} #' \item{GDD_Min_R}{Real gross value added Mining} #' \item{GDD_Min_R}{Real gross value added Mining} #' \item{GDD_Man_R}{Real gross value added Manufacture} #' \item{GDD_Elc_R}{Real gross value added Electricity} #' \item{GDD_Con_R}{Real gross value added Construction} #' \item{GDD_Trd_R}{Real gross value added Trade} #' \item{GDD_Trn_R}{Real gross value added Transportation} #' \item{GDD_Inf_R}{Real gross value added Information} #' \item{GDD_R}{Real gross value added} #' \item{GDP_DEF}{GDP deflator} #' \item{Rincpop_q}{Real population average monthly income} #' \item{Rexppop_q}{Real population average monthly expenses} #' \item{Rwage_q}{Real population average monthly wage} #' \item{imp}{Import} #' \item{exp}{Export} #' \item{cpi}{Inflation} #' \item{realest_resed_prim}{Real price for estate in primary market} #' \item{realest_resed_sec}{Real price for estate in secondary market} #' \item{realest_comm}{Real price for commercial estate} #' \item{index_stock_weighted}{Change in stock value for traded companies} #' \item{ntrade_Agr}{Change in stock value for non-traded companies Agriculture} #' \item{ntrade_Min}{Change in stock value for non-traded companies Mining} #' \item{ntrade_Man}{Change in stock value for non-traded companies Manufacture} #' \item{ntrade_Elc}{Change in stock value for non-traded companies Electricity} #' \item{ntrade_Con}{Change in stock value for non-traded companies Construction} #' \item{ntrade_Trd}{Change in stock value for non-traded companies Trade} #' \item{ntrade_Trn}{Change in stock value for non-traded companies Transportation} #' \item{ntrade_Inf}{Change in stock value for non-traded companies Information} #' \item{fed_fund_rate}{Federal Funds Rate} #' \item{govsec_rate_kzt_3m}{Return on government securities in KZT, 3 m} #' \item{govsec_rate_kzt_1y}{Return on government securities in KZT, 1 year} #' \item{govsec_rate_kzt_7y}{Return on government securities in KZT, 7 years} #' \item{govsec_rate_kzt_10y}{Return on government securities in KZT, 10 years} #' \item{tonia_rate}{TONIA} #' \item{rate_kzt_mort_0y_1y}{Weighted average mortgage lending rate for new loans, less than a year} #' \item{rate_kzt_mort_1y_iy}{Weighted average mortgage lending rate for new loans, more than a year} #' \item{rate_kzt_corp_0y_1y}{Weighted average mortgage lending rate for new loans to non-financial organizations in KZT, less than a year} #' \item{rate_usd_corp_0y_1y}{Weighted average mortgage lending rate for new loans to non-financial organizations in CKB, less than a year} #' \item{rate_kzt_corp_1y_iy}{Weighted average mortgage lending rate for new loans to non-financial organizations in KZT, more than a year} #' \item{rate_usd_corp_1y_iy}{Weighted average mortgage lending rate for new loans to non-financial organizations in CKB, more than a year} #' \item{rate_kzt_indv_0y_1y}{Weighted average mortgage lending rate for consumer loans in KZT, less than a year} #' \item{rate_kzt_indv_1y_iy}{Weighted average mortgage lending rate for consumer loans in KZT, less than a year} #' \item{usdkzt}{USD KZT exchange rate} #' \item{eurkzt}{EUR KZT exchange rate} #' \item{rurkzt}{RUB KZT exchange rate} #' \item{poil}{Price for Brent} #' \item{realest_resed_prim_rus}{Real price for estate in primary market in Russia} #' \item{realest_resed_sec_rus}{Real price for estate in secondary market in Russia} #' \item{cred_portfolio}{credit portfolio} #' \item{coef_liq_k4}{k4 prudential coefficient} #' \item{coef_k1}{k1 prudential coefficient} #' \item{coef_k3}{k3 prudential coefficient} #' \item{provisions}{provisions} #' \item{percent_margin}{percent margin} #' \item{com_inc}{commissionary income} #' \item{com_exp}{commissionary expenses} #' \item{oper_inc}{operational income} #' \item{oth_inc}{other income} #' \item{DR}{default rate} #'} #' @source Bureau of National statistics, Agency for Strategic planning and reforms of the Republic of Kazakhstan #' @references The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market #' @export "macroKZ"
/scratch/gouwar.j/cran-all/cranData/AFR/R/macroKZ.R
#' Test for normality #' Test for detecting violation of normality assumption. #' #' @param model an object of class \code{lm}. #' @param ... Other arguments. #' #' @return \code{ols_test_normality} is a list containing the #' following components: #' #' \item{kolmogorv}{kolmogorov smirnov statistic} #' \item{shapiro}{shapiro wilk statistic} #' \item{cramer}{cramer von mises statistic} #' \item{anderson}{anderson darling statistic} #' #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ imp + exp + usdkzt + poil, data = macroKZ) #' ols_test_normality(model) #' #' @import olsrr #' @importFrom stats ks.test shapiro.test #' @importFrom goftest cvm.test #' @importFrom nortest ad.test #' #' @export #' ols_test_normality <- function(model, ...){ if (!inherits(model, "lm")) { stop("Please specify a OLS linear regression model.", call. = FALSE) } residuals <- residuals(model) ks <- ks.test(residuals, "pnorm", mean(residuals), sd(residuals)) sw <- shapiro.test(residuals) cvm <- cvm.test(residuals) ad <- ad.test(residuals) result <- list(kolmogorv = ks, shapiro = sw, cramer = cvm, anderson = ad) class(result) <- "ols_test_normality" print_norm_test(result) invisible(result) } print_norm_test <- function(data) { # width w1 <- 18 w2 <- 14 w3 <- 7 w <- sum(w1, w2, w3, 8) # vectors tests <- c( "Shapiro-Wilk", "Kolmogorov-Smirnov", "Cramer-von Mises", "Anderson-Darling" ) stats <- c( data$shapiro$statistic, data$kolmogorv$statistic, data$cramer$statistic, data$anderson$statistic ) pvals <- c( data$shapiro$p.value, data$kolmogorv$p.value, data$cramer$p.value, data$anderson$p.value ) n <- length(stats) # print cat(paste("Hint:", "If p-value > 0.05, data is normally distributed.", sep="\n", "\n")) cat(rep("-", w), sep = "", "\n") cat( format("Test", width = w1, justify = "centre"), fs(), format("Statistic", width = w2, justify = "centre"), fs(), format("pvalue", width = 7, justify = "centre"), "\n" ) cat(rep("-", w), sep = "", "\n") for (i in seq_len(n)) { cat( format(tests[i], width = w1), fs(), format(as.character(round(stats[i], 4)), width = w2, justify = "centre"), fs(), format(round(pvals[i], 4), nsmall = 4, width = 7, justify = "centre"), "\n" ) } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/ols_test_normality.R
#' @title Necessary size of the time-series dataset #' @description #' Estimates number of models generated from given number of regressors X #' @usage opt_size(model) #' @param model is a linear regression model a class \code{lm}. #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' opt_size(model) #' @importFrom nlme splitFormula #' @export opt_size<-function(model){ model$call$formula<-as.formula(model) s<-splitFormula(model$call$formula, sep="+") s<-gsub("~",replacement="",x=s,ignore.case = TRUE) n<-length(s) f<-model$df.residual+1+n message<-"There is acceptable number of observations." if (f>n) message k<-as.integer(n*6) cat(paste(message),paste("It is necessary to have", k ,"observations."), paste("Your regression has", f, "observations."), sep="\n") warning("If there is equal or close number of observations, please check further.") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/opt_size.R
fl <- function(x, w) { x <- as.character(x) ret <- format(x, width = w, justify = "left") return(ret) } fg <- function(x, w) { z <- as.character(x) y <- format(z, width = w, justify = "right") return(y) } fsp <- function() { x <- rep(" ") return(x) } fc <- function(x,w){ x<- as.character (x) r<-format(x, width = w, justify = "centre") return(r) } fr <- function(x,w1){ x<- as.character (x) r<-format(x, width = w1, justify = "right") return(r) } fs <- function() { x <- rep(" ") return(x) } plot_stepwise <- function (d, title) { a <- NULL b <- NULL ggplot(d, aes(x = a, y = b)) + geom_line(color = "blue") + geom_point(color = "blue", shape = 1, size = 2) + xlab("") + ylab("") + ggtitle(title) + theme(axis.ticks = element_blank()) } print_step_forward <- function(data) { n <- length(data$predictors) if (n < 1) { stop("No variables have been added to the model based on p-values.") } # width w1 <- nchar("Step") w2 <- max(nchar("Variable"), nchar(data$predictors)) w3 <- max(nchar("R-Square"), nchar(format(round(data$rsquare, 4), nsmall = 4))) w4 <- max(nchar("R-Square"), nchar(format(round(data$adjr, 4), nsmall = 4))) w5 <- max(nchar("C(p)"), nchar(format(round(data$mallows_cp, 4), nsmall = 4))) w6 <- max(nchar("AIC"), nchar(format(round(data$aic, 4), nsmall = 4))) w7 <- max(nchar("RMSE"), nchar(format(round(data$rmse, 4), nsmall = 4))) w <- sum(w1, w2, w3, w4, w5, w6, w7, 24) cat("\n") cat(format("Selection Summary", justify = "centre", width = w), "\n") cat(rep("-", w), sep = "", "\n") cat( format("", width = w1), fs(), format("Variable", width = w2), fs(), format("", width = w3), fs(), format("Adj.", width = w4, justify = "centre"), fs(), format("", width = w5), fs(), format("", width = w6), fs(), format("", width = w7), fs(), "\n" ) cat( format("Step", width = w1, justify = "centre"), fs(), format("Entered", width = w2, justify = "centre"), fs(), format("R-Square", width = w3, justify = "centre"), fs(), format("R-Square", width = w4, justify = "centre"), fs(), format("C(p)", width = w5, justify = "centre"), fs(), format("AIC", width = w6, justify = "centre"), fs(), format("RMSE", width = w7, justify = "centre"), fs(), "\n" ) cat(rep("-", w), sep = "", "\n") for (i in seq_len(n)) { cat( format(i, width = w1), fs(), format(data$predictors[i], width = w2), fs(), format(round(data$rsquare[i], 4), width = w3, nsmall = 4), fs(), format(round(data$adjr[i], 4), width = w4, nsmall = 4), fs(), format(round(data$mallows_cp[i], 4), width = w5, justify = "centre", nsmall = 4), fs(), format(round(data$aic[i], 4), width = w6, nsmall = 4), fs(), format(round(data$rmse[i], 4), width = w7, nsmall = 4), fs(), "\n" ) } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/print.R
#' @title Pluto-Tasche method for multi-year probability of default (PD) analysis #' @description #' Calculates the variation inflation factors of all predictors in regression models #' @param pf unconditional portfolio distribution from the worst to the best credit quality #' @param num_def number of defaults in a given rating class #' @param conf_level confidence interval of PD estimates #' @param num_years number of periods used in the PD estimation #' @examples #' pf <- c(10,20,30,40) #' num_def <- c(1,2,3,4) #' conf_level = 0.99 #' num_years = 3 #' pt_multi(pf, num_def, conf_level, num_years) #' @rdname pt_multi #' @export pt_multi <- function(pf, num_def, conf_level, num_years) { mean <- mean(pf) var <- var(pf) threshold <- qnorm(1 - conf_level) * sqrt(var * num_years) + mean * num_years excess_defaults <- num_def - threshold pd <- pnorm(-excess_defaults / sqrt(var * num_years), lower.tail = FALSE) pd <- rev(round(pd, 3)) cat("Estimated probability of default:\n") return(pd) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/pt_multi.R
#' @title Pluto-Tasche method for one-year probability of default (PD) analysis #' @description #' Calculates probability of default according to One-period Pluto and Tasche model #' @param pf unconditional portfolio distribution from the worst to the best credit quality #' @param num_def number of defaults in a given rating class #' @param ci condifence interval of PD estimates #' @examples #' pf <- c(10,20,30,40) #' num_def <- c(1,2,3,4) #' pt_one(pf, num_def, ci= 0.9) #' @references Surzhko, Denis. Published 2015-05-21. LDPD package. Archived on 2022-06-20. #' @rdname pt_one #' @export pt_one <-function (pf, num_def, ci = 0.9){ r.num <- length(pf) r.PD <- rep(0, r.num) portf.CNum <- rev(cumsum(pf)) portf.CDef <- rev(cumsum(num_def)) for (r in seq_len(r.num)) { if (portf.CDef[r] == portf.CNum[r]) { r.PD[r] <- 1 } else { f <- function(x) pbinom(portf.CDef[r], portf.CNum[r], x) - 1 + ci r.PD[r] <- uniroot(f, c(0, 1))$root } } pd <- rev(round(r.PD, 3)) cat("Estimated probability of default:", pd, "\n") return(pd) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/pt_one.R
#' @title Regression forecast plot #' @description #' The function depicts forecast and actual data. #' @usage reg_plot(model, dataset) #' @param model An object of class \code{lm}. #' @param dataset A dataset based on which model was built. #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ usdkzt + eurkzt + imp + exp, data = macroKZ) #' reg_plot(model, macroKZ) #' @author The Agency of the Republic of Kazakhstan for Regulation and Development of Financial Market (AFR) #' @importFrom forecast auto.arima #' @importFrom forecast forecast #' @importFrom graphics legend lines #' @export #' reg_plot <- function(model, dataset) { model$call$formula<-as.formula(model) #dataset<-as.data.frame(dataset) s<-splitFormula(model$call$formula, sep="+") s<-gsub("~",replacement="",x=s,ignore.case = TRUE) r<-model$call$formula[2] r c<-dataset[,colnames(dataset)[grep(r,colnames(dataset))]] ar<-auto.arima(c, stationary=FALSE, seasonal=FALSE) f<-forecast(ar, h=12) p<-predict(c,h=12) s<-setNames(f$model$arma, c("p", "q", "P", "Q", "m", "d", "D")) cat(paste("Parameters of the best ARIMA model are:"), sep="\n") print(as.table(s)) plot(f,main="Forecast by Arima for 3 years ahead") lines(c,col="black") legend("topleft", legend=c("Arima","Actual"), col=c("light blue", "black"),lty=1:2, cex=0.8) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/reg_plot.R
#' Test for detecting violation of Gauss-Markov assumptions. #' #' @param y A numeric vector or an object of class \code{lm}. #' #' @return \code{reg_test} returns an object of class \code{"reg_test"}. #' An object of class \code{"reg_test"} is a list containing the #' following components: #' #' \item{bp}{Breusch-Pagan statistic} #' \item{bg}{Breusch-Godfrey statistic} #' \item{dw}{Durbin-Watson statistic} #' \item{gq}{Godfrey-Quandt statistic} #' @examples #' data(macroKZ) #' model <- lm(real_gdp~ imp + exp + poil + eurkzt + usdkzt, macroKZ) #' reg_test(model) #' @importFrom lmtest bgtest dwtest #' @importFrom lmtest bptest gqtest #' @importFrom cli console_width #' @export #' reg_test <- function(y) { w1 <- 18 w2 <- 14 w3 <- 7 w4 <- console_width() w <- sum(w1, w2, w3, 8) # vectors tests <- c( "Breusch-Pagan","Breusch-Godfrey", "Durbin-Watson", "Goldfeld-Quandt" ) stats <- c( bptest(y)$statistic, bgtest(y)$statistic, dwtest(y)$statistic, gqtest(y)$statistic ) pvals <- c( bptest(y)$p.value, bgtest(y)$p.value, dwtest(y)$p.value, gqtest(y)$p.value ) n <- length(stats) # print cat(format(as.character("Gauss-Markov assumptions tests"), width=w4, justify="centre"), "\n\n") cat(rep("-", w), sep = "", "\n") cat( format("Test", width = w1, justify = "centre"), fs(), format("Statistic", width = w2, justify = "centre"), fs(), format("p-value", width = 7, justify = "centre"), "\n" ) cat(rep("-", w), sep = "", "\n") for (i in seq_len(n)) { cat( format(tests[i], width = w1), fs(), format(as.character(round(stats[i], 4)), width = w2, justify = "centre"), fs(), format(round(pvals[i], 4), nsmall = 4, width = 7, justify = "centre"), "\n" ) } cat(rep("-", w), sep = "", "\n") }
/scratch/gouwar.j/cran-all/cranData/AFR/R/reg_test.R
#' @title Regressors selection #' @description #' The function allows to choose regressors based on multiple criteria as AIC, RMSE etc #' @param model is a linear regression model #' @param pval p value; variables with p value less than \code{pval} will #' enter into the model #' @param details Logical; if \code{TRUE}, will print the regression result at #' each step. #' @param ... other arguments #' @param progress Logical; if TRUE, will display variable selection progress. #' @param metric statistical metrics used to estimate the best model #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' regsel_f(model) #' @references Hebbali, Aravind. Published 2020-02-10. olssr package #' @import stats #' @import olsrr #' @importFrom car Anova #' @importFrom utils tail #' @rdname regsel_f #' @export regsel_f<-function(model, pval = 0.3, metric="adjr"&"aic",progress = FALSE, details = FALSE, ...) { if (details) { progress <- TRUE } l <- eval(model$call$data) nam <- colnames(attr(model$terms, "factors")) df <- nrow(l) - 2 tenter <- qt(1 - (pval) / 2, df) n <- ncol(l) response <- names(model$model)[1] all_pred <- nam cterms <- all_pred mlen_p <- length(all_pred) step <- 1 ppos <- step preds <- c() rped <- c() pvals <- c() tvals <- c() rsq <- c() adjrsq <- c() aic <- c() bic <- c() cp <- c() if (progress) { cat(format("Forward Selection Method", justify = "left", width = 27), "\n") cat(rep("-", 27), sep = "", "\n\n") cat(format("Candidate Terms:", justify = "left", width = 16), "\n\n") for (i in seq_len(length(nam))) { cat(paste0(i, ". ", nam[i]), "\n") } cat("\n") cat("We are selecting variables based on p value...") cat("\n") cat("\n") if (!details) { cat("Variables Entered:", "\n\n") } } for (i in seq_len(mlen_p)) { predictors <- all_pred[i] m <- lm(paste(response, "~", paste(predictors, collapse = " + ")), l) m_sum <- Anova(m) pvals[i] <- m_sum$`Pr(>F)`[ppos] } minp <- which(pvals == min(pvals, na.rm = TRUE)) rped <- c(rped, preds[minp]) preds <- all_pred[minp] lpreds <- length(preds) fr <- ols_regress(paste(response, "~", paste(preds, collapse = " + ")), l) rsq <- fr$rsq adjrsq <- fr$adjr cp <- ols_mallows_cp(fr$model, model) aic <- ols_aic(fr$model) sbc <- ols_sbc(fr$model) sbic <- ols_sbic(fr$model, model) rmse <- sqrt(fr$ems) if (details) { cat("\n") cat(paste("Forward Selection: Step", step), "\n\n") } if (progress) { if (interactive()) { cat("+", tail(preds, n = 1), "\n") } else { cat(paste("-", tail(preds, n = 1)), "\n") } } if (details) { cat("\n") m <- ols_regress(paste(response, "~", paste(preds, collapse = " + ")), l) print(m) cat("\n\n") } while (step < mlen_p) { all_pred <- all_pred[-minp] len_p <- length(all_pred) ppos <- ppos + length(minp) pvals <- c() tvals <- c() for (i in seq_len(len_p)) { predictors <- c(preds, all_pred[i]) m <- lm(paste(response, "~", paste(predictors, collapse = " + ")), l) m_sum <- Anova(m) pvals[i] <- m_sum$`Pr(>F)`[ppos] } minp <- which(pvals == min(pvals, na.rm = TRUE)) if (pvals[minp] <= pval) { step <- step + 1 preds <- c(preds, all_pred[minp]) lpreds <- length(preds) fr <- ols_regress(paste(response, "~", paste(preds, collapse = " + ")), l) rsq <- c(rsq, fr$rsq) adjrsq <- c(adjrsq, fr$adjr) aic <- c(aic, ols_aic(fr$model)) sbc <- c(sbc, ols_sbc(fr$model)) sbic <- c(sbic, ols_sbic(fr$model, model)) cp <- c(cp, ols_mallows_cp(fr$model, model)) rmse <- c(rmse, sqrt(fr$ems)) if (details) { cat("\n") cat(paste("Forward Selection: Step", step), "\n\n") } if (progress) { if (interactive()) { cat("+", tail(preds, n = 1), "\n") } else { cat(paste("-", tail(preds, n = 1)), "\n") } } if (details) { cat("\n") m <- ols_regress(paste(response, "~", paste(preds, collapse = " + ")), l) print(m) cat("\n\n") } } else { if (progress) { cat("\n") cat("No more variables to be added.") } break } } prsq <- c(rsq[1], diff(rsq)) if (details) { cat("\n\n") cat("Variables Entered:", "\n\n") for (i in seq_len(length(preds))) { if (details) { cat("+", preds[i], "\n") } else { cat(paste("+", preds[i]), "\n") } } } if (progress) { cat("\n\n") cat("Final Model Output", "\n") cat(rep("-", 18), sep = "", "\n\n") fi <- ols_regress( paste(response, "~", paste(preds, collapse = " + ")), data = l ) print(fi) } final_model <- lm(paste(response, "~", paste(preds, collapse = " + ")), data = l) out <- list(predictors = preds, removed=rped, mallows_cp = cp, indvar = cterms, rsquare = rsq, steps = step, sbic = sbic, adjr = adjrsq, rmse = rmse, aic = aic, sbc = sbc, model = final_model) class(out) <- "ols_step_forward_p" return(out) } #' @export #' @noRd #' #' print.regsel_f <- function(x, ...) { if (x$steps > 0) { print_step_forward(x) } else { print("No variables have been added to the model.") } } #' @importFrom gridExtra marrangeGrob #' @export #' @noRd #' plot.regsel_f <- function(x, model=NA, print_plot = TRUE, ...) { a <- NULL b <- NULL y <- seq_len(length(x$rsquare)) d1 <- data.frame(a = y, b = x$rsquare) d2 <- data.frame(a = y, b = x$adjr) d3 <- data.frame(a = y, b = x$mallows_cp) d4 <- data.frame(a = y, b = x$aic) d5 <- data.frame(a = y, b = x$sbic) d6 <- data.frame(a = y, b = x$sbc) p1 <- plot_stepwise(d1, "R-Square") p2 <- plot_stepwise(d2, "Adj. R-Square") p3 <- plot_stepwise(d3, "C(p)") p4 <- plot_stepwise(d4, "AIC") p5 <- plot_stepwise(d5, "SBIC") p6 <- plot_stepwise(d6, "SBC") myplots <- list(plot_1 = p1, plot_2 = p2, plot_3 = p3, plot_4 = p4, plot_5 = p5, plot_6 = p6) if (print_plot) { marrangeGrob(myplots, nrow = 2, ncol = 2) } else { return(myplots) } }
/scratch/gouwar.j/cran-all/cranData/AFR/R/regsel_f.R
#' @title VIF by variable #' @description #' Calculates the variation inflation factors of all predictors in regression models #' @param model is a linear regression model #' @examples #' data(macroKZ) #' model <- lm(real_gdp ~ imp + exp + poil + eurkzt + tonia_rate, data = macroKZ) #' vif_reg(model) #' @importFrom cli console_width #' @references Petrie, Adam. Published 2020-02-21. regclass package #' @rdname vif_reg #' @export vif_reg<-function (model) { if (any(is.na(coef(model)))) stop("there are aliased coefficients in the model") v <- vcov(model) assign <- attr(model.matrix(model), "assign") if (names(coefficients(model)[1]) == "(Intercept)") { v <- v[-1, -1] assign <- assign[-1] } else warning("No intercept: vifs may not be sensible.") terms <- labels(terms(model)) n.terms <- length(terms) if (n.terms < 2) stop("model contains fewer than 2 terms") R <- cov2cor(v) detR <- det(R) result <- matrix(0, n.terms, 3) rownames(result) <- terms colnames(result) <- c("GVIF", "Df", "GVIF^(1/(2*Df))") for (term in 1:n.terms) { subs <- which(assign == term) result[term, 1] <- det(as.matrix(R[subs, subs])) * det(as.matrix(R[-subs, -subs]))/detR result[term, 2] <- length(subs) } if (all(result[, 2] == 1)) result <- result[, 1] else result[, 3] <- result[, 1]^(1/(2 * result[, 2])) l<-matrix(result, dimnames=list(terms)) #print w3 <- console_width() cat(format(as.character("Variance Inflation Factor"), width=w3, justify="centre"), "\n\n") cat(paste("If statistics exceeds 5, please be aware of multicollinearity."), sep='\n') cat("\n") print(round(result,3)) cat("\n") l<-matrix(result, dimnames=list(terms)) for (i in 1:length(l)){ if (l[i]>5) cat(paste("This value", round(l[i], 3), "exceeds acceptable threshold."), sep='\n') } }
/scratch/gouwar.j/cran-all/cranData/AFR/R/vif_reg.R
.onAttach <- function(libname, pkgname) { mylib <- dirname(system.file(package = "AFR")) ver <- utils::packageDescription("AFR")["Version"] txt <- c("\n", paste(sQuote("AFR"), "version:", ver), "\n", paste(sQuote("AFR"), "is a package for banking sector analysis", "and easier interpretation of statistical functions."), "\n", paste("See", sQuote("library(help=\"AFR\")"), "for details.")) if(interactive() || getOption("verbose")) packageStartupMessage(paste(strwrap(txt, indent = 4, exdent = 4), collapse = "\n")) }
/scratch/gouwar.j/cran-all/cranData/AFR/R/zzz.R
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, echo=FALSE, message=FALSE----------------------------------------- library(AFR) library(stats) library(tseries) ## ----echo=TRUE---------------------------------------------------------------- data(macroKZ) checkdata(macroKZ) ## ----echo=TRUE---------------------------------------------------------------- macroKZ<-na.remove(macroKZ) ## ----results="hide"----------------------------------------------------------- new<-log(macroKZ) ## ----results="hide"----------------------------------------------------------- corsel(macroKZ,num=FALSE,thrs=0.65) ## ----echo=TRUE---------------------------------------------------------------- model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ)
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Data-transformation.R
--- title: "Data-transformation" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Data-transformation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(stats) library(tseries) ``` ## Introduction For the unbiased statistical analysis of data transformation is necessary to transform data for fit model assumptions. *AFR* package has default time-series dataset *macroKZ* of macroeconomic parameters for 2010-2022 period. Dataset is raw, not ordered, with missing values and etc. **AFR** recommends: Step 1. Check data for the format, missing values, outliers and *summary* statistics (min, max and etc). Step 2. Check data for stationarity. Step 3. In case of non-stationarity transform data to stationarity by transformation method. Step 4. As data is transformed, choose regressors for a model. ### Step 1 As default dataset *macroKZ* is uploaded, check dataset by *checkdata* and *summary* functions. Depending on the outputs, apply necessary functions to eliminate inappropriate properties of the data. For instance, in case of missing values delete these missing values. ```{r, echo=TRUE} data(macroKZ) checkdata(macroKZ) ``` Depending on the outputs, apply necessary functions to eliminate inappropriate properties of the data. For instance, in case of missing values delete these missing values. ```{r, echo=TRUE} macroKZ<-na.remove(macroKZ) ``` ### Step 2 As dataset is preliminary cleaned, time-series data needs to be stationary. Stationarity is needed for the properties to be independent of time periods, i.e. mean, variance etc are constant over time. In R stationarity can be checked by Augmented-Dickey Fuller (*adf.test*) and/or Kwiatkowski-Phillips-Schmidt-Shin (*kpss.test*) tests. In more details, *macroKZ* can use *sapply* function to view which parameter is stationary or not. ### Step 3 If dataset, as a whole, or individual parameters are non-stationary, it is recommended to apply transformation techniques to make data stationary. Most common transformation tools are differencing (first and second order), logarithming, difference of logarithms, detrending and etc. After transformation method(s) is applied, make sure that data is stationary. ```{r, results="hide"} new<-log(macroKZ) ``` ### Step 4 To build the best regression model regressors/independent variables need to be independent of each other. If this condition is violated, multicollinearity presents and regression estimators are biased. *AFR* package offers *corsel* function that estimates correlation between regressors in the dataset given a threshold (set by the user). The result can be presented numerically or logically (TRUE/FALSE). ```{r, results="hide"} corsel(macroKZ,num=FALSE,thrs=0.65) ``` Once regressors are chosen, linear regression model can be built via *lm* function. ```{r, echo=TRUE} model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ) ```
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Data-transformation.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, echo=FALSE, message=FALSE----------------------------------------- library(AFR) library(lmtest) library(stats) library(olsrr) ## ----echo=TRUE---------------------------------------------------------------- model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bp(model) ## ----echo=TRUE---------------------------------------------------------------- model <- lm(real_gdp ~ imp + exp+poil+eurkzt, macroKZ) gq(model) ## ----echo=TRUE---------------------------------------------------------------- model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) vif_reg(model) ## ----echo=TRUE---------------------------------------------------------------- model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) dwtest(model) ## ----echo=TRUE---------------------------------------------------------------- model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bg(model) ## ----echo=TRUE---------------------------------------------------------------- #model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) #ols_test_normality(model)
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Diagnostic-tests.R
--- title: "Diagnostic-tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Diagnostic-tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(lmtest) library(stats) library(olsrr) ``` ## Introduction For the analysis of multiple linear regression models statisticians apply Gauss-Markov theorem for the estimators of the regression to be best linear unbiased estimators (BLUE). The theorem includes 5 assumptions about heteroskedasticity, linearity, exogeneity, random sampling and non-collinearity. **AFR** provides: 2 tests for detecting heteroskedasticity: - Breusch-Pagan Test - Goldfeld-Quandt Test 3 tests for detecting multicollinearity and autocorrelation: - VIF test - Durbin Watson Test - Breusch-Godfrey Test 4 tests for detecting normality: - Shapiro-Wilk test - Kolmogorov-Smirnov test - Cramer-Von Mises test - Anderson test ### Heteroskedasticity One of the assumptions made about residuals/errors in OLS regression is that the errors have the same but unknown variance. This is known as constant variance or homoscedasticity. When this assumption is violated, the problem is known as heteroscedasticity.Heteroskedasticity is one of 5 Gauss-Markov assumptions. It is tested by Breusch-Pagan and Goldfeld-Quandt tests. #### Breusch-Pagan Test Breusch Pagan Test was introduced by Trevor Breusch and Adrian Pagan in 1979. It is used to test for heteroskedasticity in a linear regression model and assumes that the error terms are normally distributed. It tests whether the variance of the errors from a regression is dependent on the values of the independent variables. Null hypothesis states that error variances are constant. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bp(model) ``` #### Goldfeld-Quandt Test The Goldfeld Quandt Test is a test used in regression analysis to test for homoscedasticity. It compares variances of two subgroups; one set of high values and one set of low values. If the variances differ, the test rejects the null hypothesis that the variances of the errors are not constant. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp+poil+eurkzt, macroKZ) gq(model) ``` ### Non-collinearity Multiple regression assumes that the independent variables are not highly correlated with each other. This assumption is tested using Variance Inflation Factor (VIF) values and by Durbin-Watson and Breusch-Godfrey tests for autocorrelation. #### VIF Test The VIF of the linear regression is defined as VIF = 1/T. With VIF > 5 there is an indication that multicollinearity may be present; with VIF > 10 there is certainly multicollinearity among the variables. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) vif_reg(model) ``` #### Durbin-Watson Test The Durbin Watson (DW) statistic is a test for autocorrelation in the residuals from a statistical model or regression analysis. The Durbin-Watson statistic will always have a value ranging between 0 and 4. A value of 2.0 indicates there is no autocorrelation detected in the sample. Values from 0 to less than 2 point to positive autocorrelation and values from 2 to 4 means negative autocorrelation. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) dwtest(model) ``` #### Breusch-Godfrey Test Alternatively, there is Breusch-Godfrey Test for autocorrelation check.It tests for the presence of serial correlation that has not been included in a proposed model structure and which, if present, would mean that incorrect conclusions would be drawn from other tests or that sub-optimal estimates of model parameters would be obtained.Null hypothesis states that there is no autocorrelation. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bg(model) ``` ### Normality Normality refers to a specific statistical distribution called a normal distribution, or sometimes the Gaussian distribution or bell-shaped curve. The normal distribution is a symmetrical continuous distribution defined by the mean and standard deviation of the data. In AFR package 4 normality tests are compiled in one *norm_test* function referred to *olsrr* package. ```{r, echo=TRUE} #model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) #ols_test_normality(model) ``` #### Shapiro-Wilk statistic The null-hypothesis of this test is that the population is normally distributed. Thus, if the p value is less than the chosen alpha level, then the null hypothesis is rejected and there is evidence that the data tested are not normally distributed. On the other hand, if the p value is greater than the chosen alpha level, then the null hypothesis (that the data came from a normally distributed population) can not be rejected. #### Kolmogorov-Smirnov statistic The Kolmogorov–Smirnov statistic quantifies a distance between the empirical distribution function of the sample and the cumulative distribution function of the reference distribution, or between the empirical distribution functions of two samples. The null distribution of this statistic is calculated under the null hypothesis that the sample is drawn from the reference distribution (in the one-sample case) or that the samples are drawn from the same distribution (in the two-sample case).Since the p-value is less than .05, we reject the null hypothesis. #### Cramer-Von Mises test Alternative to Kolmogorov-Smirnov test, Cramer-von Mises statistic is a measure of the mean squared difference between the empirical and hypothetical cumulative distribution functions. It is also used as a part of other algorithms, such as minimum distance estimation.The Cramér–von Mises test can be seen to be distribution-free if empirical distribution is continuous and the sample has no ties. Otherwise, statistic is not the true asymptotic distribution. #### Anderson test The Anderson-Darling test is used to test if a sample of data comes from a population with a specific distribution.The null hypothesis is that your data is not different from normal. Your alternate or alternative hypothesis is that your data is different from normal. You will make your decision about whether to reject or not reject the null based on your p-value. For additional information please address: *Wooldridge, Jeffrey M. 2012. Introductory Econometrics: A Modern Approach, Fifth Edition.* *Hyndman, Rob J and George Athanasopoulos. 2018. Forecasting: Principles and Practice, 2nd Edition.*
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Diagnostic-tests.Rmd
## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, echo=FALSE, message=FALSE----------------------------------------- library(AFR) library(olsrr) library(stats) ## ----echo=TRUE---------------------------------------------------------------- model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ) opt_size(model) ## ----results="hide"----------------------------------------------------------- check_betas(model) ## ----results="hide"----------------------------------------------------------- dec_plot(model, macroKZ) ## ----results="hide"----------------------------------------------------------- reg_plot(model, macroKZ)
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Regression-model.R
--- title: "Regression-model" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Regression-model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(olsrr) library(stats) ``` ## Introduction As regressors are chosen for a linear regression model, **AFR** package recommends to check for: ### 1. Optimal size of the time-series data Function *opt_size* assess whether time-series data has enough observations for the chosen model. ```{r, echo=TRUE} model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ) opt_size(model) ``` Based on the output of the function, modify the model, i.e. remove or add regressor(s). ### 2. Choose the best regression model From the initially built linear regression model *regsel_f* function allows to choose the best regressors by Akaike Information criterion (*AIC*) and Adjusted R-squared (*Adj R2*) parameters. These parameters are set by default, but other parameters can be added too. To dive into details, *check_betas* function demonstrates all models with regressors' betas based on which *regsel_f* function gives the result. A user can export the output of all models into Excel document for more representative format by using function *write_xlsx* of *writexl* package. ```{r, results="hide"} check_betas(model) ``` ### 3. Analysis of the model As *regsel_f* gave the best regression model, it can be analysed by diagnostic tests for the compliance with Gauss-Markov theorem for a multiple regression model. Graphically, the regression model can be visualized for decomposition and forecasting. Function *dec_plot* demonstrates a contribution of each regressor in a form of stacked bar plot. ```{r, results="hide"} dec_plot(model, macroKZ) ``` Function *reg_plot* shows actual and forecast data. Forecasting can be performed by Arima or trending. ```{r, results="hide"} reg_plot(model, macroKZ) ```
/scratch/gouwar.j/cran-all/cranData/AFR/inst/doc/Regression-model.Rmd
--- title: "Data-transformation" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Data-transformation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(stats) library(tseries) ``` ## Introduction For the unbiased statistical analysis of data transformation is necessary to transform data for fit model assumptions. *AFR* package has default time-series dataset *macroKZ* of macroeconomic parameters for 2010-2022 period. Dataset is raw, not ordered, with missing values and etc. **AFR** recommends: Step 1. Check data for the format, missing values, outliers and *summary* statistics (min, max and etc). Step 2. Check data for stationarity. Step 3. In case of non-stationarity transform data to stationarity by transformation method. Step 4. As data is transformed, choose regressors for a model. ### Step 1 As default dataset *macroKZ* is uploaded, check dataset by *checkdata* and *summary* functions. Depending on the outputs, apply necessary functions to eliminate inappropriate properties of the data. For instance, in case of missing values delete these missing values. ```{r, echo=TRUE} data(macroKZ) checkdata(macroKZ) ``` Depending on the outputs, apply necessary functions to eliminate inappropriate properties of the data. For instance, in case of missing values delete these missing values. ```{r, echo=TRUE} macroKZ<-na.remove(macroKZ) ``` ### Step 2 As dataset is preliminary cleaned, time-series data needs to be stationary. Stationarity is needed for the properties to be independent of time periods, i.e. mean, variance etc are constant over time. In R stationarity can be checked by Augmented-Dickey Fuller (*adf.test*) and/or Kwiatkowski-Phillips-Schmidt-Shin (*kpss.test*) tests. In more details, *macroKZ* can use *sapply* function to view which parameter is stationary or not. ### Step 3 If dataset, as a whole, or individual parameters are non-stationary, it is recommended to apply transformation techniques to make data stationary. Most common transformation tools are differencing (first and second order), logarithming, difference of logarithms, detrending and etc. After transformation method(s) is applied, make sure that data is stationary. ```{r, results="hide"} new<-log(macroKZ) ``` ### Step 4 To build the best regression model regressors/independent variables need to be independent of each other. If this condition is violated, multicollinearity presents and regression estimators are biased. *AFR* package offers *corsel* function that estimates correlation between regressors in the dataset given a threshold (set by the user). The result can be presented numerically or logically (TRUE/FALSE). ```{r, results="hide"} corsel(macroKZ,num=FALSE,thrs=0.65) ``` Once regressors are chosen, linear regression model can be built via *lm* function. ```{r, echo=TRUE} model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ) ```
/scratch/gouwar.j/cran-all/cranData/AFR/vignettes/Data-transformation.Rmd
--- title: "Diagnostic-tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Diagnostic-tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(lmtest) library(stats) library(olsrr) ``` ## Introduction For the analysis of multiple linear regression models statisticians apply Gauss-Markov theorem for the estimators of the regression to be best linear unbiased estimators (BLUE). The theorem includes 5 assumptions about heteroskedasticity, linearity, exogeneity, random sampling and non-collinearity. **AFR** provides: 2 tests for detecting heteroskedasticity: - Breusch-Pagan Test - Goldfeld-Quandt Test 3 tests for detecting multicollinearity and autocorrelation: - VIF test - Durbin Watson Test - Breusch-Godfrey Test 4 tests for detecting normality: - Shapiro-Wilk test - Kolmogorov-Smirnov test - Cramer-Von Mises test - Anderson test ### Heteroskedasticity One of the assumptions made about residuals/errors in OLS regression is that the errors have the same but unknown variance. This is known as constant variance or homoscedasticity. When this assumption is violated, the problem is known as heteroscedasticity.Heteroskedasticity is one of 5 Gauss-Markov assumptions. It is tested by Breusch-Pagan and Goldfeld-Quandt tests. #### Breusch-Pagan Test Breusch Pagan Test was introduced by Trevor Breusch and Adrian Pagan in 1979. It is used to test for heteroskedasticity in a linear regression model and assumes that the error terms are normally distributed. It tests whether the variance of the errors from a regression is dependent on the values of the independent variables. Null hypothesis states that error variances are constant. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bp(model) ``` #### Goldfeld-Quandt Test The Goldfeld Quandt Test is a test used in regression analysis to test for homoscedasticity. It compares variances of two subgroups; one set of high values and one set of low values. If the variances differ, the test rejects the null hypothesis that the variances of the errors are not constant. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp+poil+eurkzt, macroKZ) gq(model) ``` ### Non-collinearity Multiple regression assumes that the independent variables are not highly correlated with each other. This assumption is tested using Variance Inflation Factor (VIF) values and by Durbin-Watson and Breusch-Godfrey tests for autocorrelation. #### VIF Test The VIF of the linear regression is defined as VIF = 1/T. With VIF > 5 there is an indication that multicollinearity may be present; with VIF > 10 there is certainly multicollinearity among the variables. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) vif_reg(model) ``` #### Durbin-Watson Test The Durbin Watson (DW) statistic is a test for autocorrelation in the residuals from a statistical model or regression analysis. The Durbin-Watson statistic will always have a value ranging between 0 and 4. A value of 2.0 indicates there is no autocorrelation detected in the sample. Values from 0 to less than 2 point to positive autocorrelation and values from 2 to 4 means negative autocorrelation. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) dwtest(model) ``` #### Breusch-Godfrey Test Alternatively, there is Breusch-Godfrey Test for autocorrelation check.It tests for the presence of serial correlation that has not been included in a proposed model structure and which, if present, would mean that incorrect conclusions would be drawn from other tests or that sub-optimal estimates of model parameters would be obtained.Null hypothesis states that there is no autocorrelation. ```{r, echo=TRUE} model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) bg(model) ``` ### Normality Normality refers to a specific statistical distribution called a normal distribution, or sometimes the Gaussian distribution or bell-shaped curve. The normal distribution is a symmetrical continuous distribution defined by the mean and standard deviation of the data. In AFR package 4 normality tests are compiled in one *norm_test* function referred to *olsrr* package. ```{r, echo=TRUE} #model <- lm(real_gdp ~ imp + exp + poil + eurkzt,macroKZ) #ols_test_normality(model) ``` #### Shapiro-Wilk statistic The null-hypothesis of this test is that the population is normally distributed. Thus, if the p value is less than the chosen alpha level, then the null hypothesis is rejected and there is evidence that the data tested are not normally distributed. On the other hand, if the p value is greater than the chosen alpha level, then the null hypothesis (that the data came from a normally distributed population) can not be rejected. #### Kolmogorov-Smirnov statistic The Kolmogorov–Smirnov statistic quantifies a distance between the empirical distribution function of the sample and the cumulative distribution function of the reference distribution, or between the empirical distribution functions of two samples. The null distribution of this statistic is calculated under the null hypothesis that the sample is drawn from the reference distribution (in the one-sample case) or that the samples are drawn from the same distribution (in the two-sample case).Since the p-value is less than .05, we reject the null hypothesis. #### Cramer-Von Mises test Alternative to Kolmogorov-Smirnov test, Cramer-von Mises statistic is a measure of the mean squared difference between the empirical and hypothetical cumulative distribution functions. It is also used as a part of other algorithms, such as minimum distance estimation.The Cramér–von Mises test can be seen to be distribution-free if empirical distribution is continuous and the sample has no ties. Otherwise, statistic is not the true asymptotic distribution. #### Anderson test The Anderson-Darling test is used to test if a sample of data comes from a population with a specific distribution.The null hypothesis is that your data is not different from normal. Your alternate or alternative hypothesis is that your data is different from normal. You will make your decision about whether to reject or not reject the null based on your p-value. For additional information please address: *Wooldridge, Jeffrey M. 2012. Introductory Econometrics: A Modern Approach, Fifth Edition.* *Hyndman, Rob J and George Athanasopoulos. 2018. Forecasting: Principles and Practice, 2nd Edition.*
/scratch/gouwar.j/cran-all/cranData/AFR/vignettes/Diagnostic-tests.Rmd
--- title: "Regression-model" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Regression-model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo=FALSE, message=FALSE} library(AFR) library(olsrr) library(stats) ``` ## Introduction As regressors are chosen for a linear regression model, **AFR** package recommends to check for: ### 1. Optimal size of the time-series data Function *opt_size* assess whether time-series data has enough observations for the chosen model. ```{r, echo=TRUE} model<-lm(real_gdp~imp+exp+usdkzt+eurkzt, macroKZ) opt_size(model) ``` Based on the output of the function, modify the model, i.e. remove or add regressor(s). ### 2. Choose the best regression model From the initially built linear regression model *regsel_f* function allows to choose the best regressors by Akaike Information criterion (*AIC*) and Adjusted R-squared (*Adj R2*) parameters. These parameters are set by default, but other parameters can be added too. To dive into details, *check_betas* function demonstrates all models with regressors' betas based on which *regsel_f* function gives the result. A user can export the output of all models into Excel document for more representative format by using function *write_xlsx* of *writexl* package. ```{r, results="hide"} check_betas(model) ``` ### 3. Analysis of the model As *regsel_f* gave the best regression model, it can be analysed by diagnostic tests for the compliance with Gauss-Markov theorem for a multiple regression model. Graphically, the regression model can be visualized for decomposition and forecasting. Function *dec_plot* demonstrates a contribution of each regressor in a form of stacked bar plot. ```{r, results="hide"} dec_plot(model, macroKZ) ``` Function *reg_plot* shows actual and forecast data. Forecasting can be performed by Arima or trending. ```{r, results="hide"} reg_plot(model, macroKZ) ```
/scratch/gouwar.j/cran-all/cranData/AFR/vignettes/Regression-model.Rmd
############## AF as a function of heritability ##################### #' @title Plot the attributable fraction as a function of heritability, disease prevalence, size of target group and intervention effect. #' @description \code{AFfunction} is a function which illustrates the AF as a function of heritability, disease prevalence, size of target group and intervention effect. #' @param Prevalence an estimate of the disease prevalence #' @param Heritability an estimate of the disease heritability #' @param Target proportion of those at highest genetic risk being targeted by the intervention #' @param Intervention effect of intervention #' @param xaxis option to specify which of the arguments \code{Prevalence, Heritability, Target} or \code{Intervention} should be used as the xaxis of the plot. The argument \code{xaxis} is a string with values \code{"Prevalence", "Heritability", "Target"} or \code{"Intervention"}. #' @param compare option to specify which of the arguments \code{Prevalence, Heritability, Target} or \code{Intervention} should be used for comparisons. The argument \code{compare} can be specified as a numeric vector with a range of values or as a single value, see examples. #' @param Intervention_type an option to specify how the intervention is expected to affect the genetic liability distribution. The default option \code{"location"} assumes that the intervention shifts the genetic liability distribution to lower levels, among those targeted by the intervention. The option \code{"scale"} assumes that the intervention reduce the variance of the genetic liability distribution, among those targeted by the intervention. #' @param plot option to return a plot. Default is set to \code{TRUE}. #' @param legend option to return a legend in the plot. Default is set to \code{TRUE}. #' @param cex specifies the text size in the plot. Default is set to size \code{1.4}. #' @param ... further arguments to be passed to the ggplot function. See \code{\link[ggplot2]{ggplot}}. #' @return \item{AF}{the AF as a function of heritability, disease prevalence, size of target group and intervention effect.} #' @return \item{plot}{Plot of the AF as a function of either heritability, disease prevalence, size of target group and intervention effect. The legend shows a comparison variable.} #' @details The AFfunction() is a function that produce a plot of the AF as a function of \code{Prevalence, Heritability, Target} or \code{Intervention}. A user interface of the function is provided in \code{\link[AFheritability]{runShinyApp}}. #' @references Dahlqwist E et al. (2019) <doi:10.1007/s00439-019-02006-8>. #' @examples #'# Example #' heritability <- seq(0,1, by=0.1) #' target_sizes <- sort(c(0.30, 0.25, 0.20, 0.15, 0.05, 0.01)) #' #' AF_h <- AFfunction(Prevalence=0.5, Heritability = heritability, #' Target = target_sizes, Intervention = 1, #' compare="Target", xaxis = "Heritability", #' ylim = c(0,0.3), cex = 1.6) #' #' AF_h #' @importFrom ggplot2 ggplot aes geom_line geom_point theme_bw theme element_blank element_line element_text labs #' @importFrom reshape2 melt #' @importFrom stats qnorm #' @import stats mvtnorm ggplot2 reshape2 #' @export AFfunction <- function(Prevalence, Heritability, Target, Intervention, xaxis, compare, Intervention_type="location", plot = TRUE, legend = TRUE, cex=1.4,...){ if (requireNamespace("ggplot2", quietly = TRUE)) { requireNamespace("ggplot2") } else { plot == FALSE } if(compare == 'Prevalence' & xaxis == 'Heritability') func <- AF_heritability_prevalence if(compare == 'Target' & xaxis =='Heritability' ) func <- AF_heritability_target if(compare == 'Intervention' & xaxis == 'Heritability') func <- AF_heritability_intervention if(compare =='Heritability' & xaxis == 'Prevalence') func <- AF_prevalence_heritability if(compare =='Target' & xaxis == 'Prevalence') func <- AF_prevalence_target if(compare == 'Intervention' & xaxis == 'Prevalence') func <- AF_prevalence_intervention if(compare == 'Heritability' & xaxis =='Target') func <- AF_target_heritability if(compare =='Prevalence' & xaxis =='Target') func <- AF_target_prevalence if(compare == 'Intervention' & xaxis =='Target' ) func <- AF_target_intervention if(compare =='Heritability' & xaxis == 'Intervention') func <- AF_intervention_heritability if(compare =='Prevalence' & xaxis =='Intervention') func <- AF_intervention_prevalence if(compare =='Target' & xaxis == 'Intervention') func <- AF_intervention_target AF_est <- func(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, ...) return(AF_est) }
/scratch/gouwar.j/cran-all/cranData/AFheritability/R/AFfunction.R
######### Background functions for plots Counterfactual_prev_target_location <- function(p, b, k, h2){ corrmatrix <- diag(2) corrmatrix[2,1] <- sqrt(h2) corrmatrix[1,2] <- sqrt(h2) beta <- qnorm(p) p_target <- as.numeric(pmvnorm(lower=c(-beta, b), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) lower1 <- -beta + k * sqrt(h2) lower2 <- b p_target_delta <- as.numeric(pmvnorm(lower=c(lower1, lower2), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) AF <- as.numeric((p_target - p_target_delta) / p) return(AF) } Counterfactual_prev_target_scale <- function(p, b, delta, h2){ corrmatrix <- diag(2) corrmatrix[2,1] <- sqrt(h2) corrmatrix[1,2] <- sqrt(h2) beta <- qnorm(p, lower.tail = FALSE) p_target <- as.numeric(pmvnorm(lower=c(beta, b), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) lower1 <- beta / sqrt(h2 / delta^2 + (1-h2)) lower2 <- b p_target_delta <- as.numeric(pmvnorm(lower=c(lower1, lower2), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) AF <- as.numeric((p_target - p_target_delta) / p) return(AF) } ######## To calculate counterfactual disease prevalence among the target group Counterfactual_prev <- function(p, b, k, h2){ corrmatrix <- diag(2) corrmatrix[2,1] <- sqrt(h2) corrmatrix[1,2] <- sqrt(h2) beta <- qnorm(p) p_target <- as.numeric(pmvnorm(lower=c(-beta, b), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) lower1 <- -beta + k * sqrt(h2) lower2 <- b p_target_delta <- as.numeric(pmvnorm(lower=c(lower1, lower2), upper=c(Inf, Inf), mean=rep(0, 2), corr=corrmatrix)) p_new <- (p- p_target) + p_target_delta p_p <- p_target_delta/p_target out=list(p_target=p_target, p_target_delta=p_target_delta, p_new=p_new, p_p=p_p) return(out) } ############ Translate quantiles into proportions probabilities <- function(p) { percent <- function(x){ probability <- x * 100 if(probability >= 1) {probability <- round(probability, digits = 1)} if(probability < 1 & probability > 0.1) probability <- round(probability, digits = 2) if(probability <= 0.1 ) probability <- round(probability, digits = 3) probability <- paste(probability,"%") return(probability) } prob <- lapply(p, percent) prob <- unlist(prob) return(prob) } ######### xaxis = heritability ############ compare = prevalence AF_heritability_prevalence <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ...){ AF <- matrix("list", nrow=length(Prevalence), ncol=length(Heritability)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Heritability)){ heritability <- Heritability[j] for (i in 1:length(Prevalence)){ prevalence <- Prevalence[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = Intervention, b = Target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = prevalence, delta = Intervention, b = Target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Prevalence_text <- probabilities(Prevalence) colnames(AF) <- Prevalence_text AF$Heritability <- Heritability plot_data <- melt(AF,id.var="Heritability") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Heritability", "Prevalence", "AF") ## Plot plot_heritability <- ggplot(plot_data, aes(x=Heritability,y=AF,group=Prevalence,colour=Prevalence)) + geom_line(aes(linetype=Prevalence), size=1) + geom_point(aes(shape=Prevalence)) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x=expression(paste("Heritability ", "(", h^2, ")"))) print(plot_heritability) out <- list(AF = AF, plot_heritability = plot) } out <- list(AF = AF) return(out) } ############ compare = target AF_heritability_target <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Target), ncol=length(Heritability)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Heritability)){ heritability <- Heritability[j] for (i in 1:length(Target)){ target <- Target[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = Intervention, b = target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = Prevalence, delta = Intervention, b = target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Target_percent_text <- probabilities(Target_percent) colnames(AF) <- Target_percent_text AF$Heritability <- Heritability plot_data <- melt(AF,id.var="Heritability") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Heritability", "Target_percent", "AF") ## Plot plot_heritability <- ggplot(plot_data, aes(x=Heritability, y=AF, group=Target_percent, colour=Target_percent)) + geom_line(aes(linetype=Target_percent), size=1) + geom_point(aes(shape=Target_percent), size =2) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17)) + labs(x= expression(paste("Heritability ", "(", h^2, ")")), colour = "Targeted", lty= "Targeted", shape = "Targeted") print(plot_heritability) out <- list(AF = AF, plot_heritability = plot) } out <- list(AF = AF) return(out) } ############ compare = intervention AF_heritability_intervention <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Intervention), ncol=length(Heritability)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Heritability)){ heritability <- Heritability[j] for (i in 1:length(Intervention)){ intervention <- Intervention[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = intervention, b = Target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = Prevalence, delta = intervention, b = Target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) colnames(AF) <- Intervention AF$Heritability <- Heritability plot_data <- melt(AF,id.var="Heritability") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Heritability", "Intervention", "AF") ## Plot plot_heritability <- ggplot(plot_data, aes(x=Heritability, y=AF, group=Intervention, colour=Intervention)) + geom_line(aes(lty=Intervention), size=1)+ geom_point(aes(shape=Intervention), size =2)+theme_bw()+ theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= expression(paste("Heritability ", "(", h^2, ")"))) print(plot_heritability) out <- list(AF = AF, plot_heritability = plot) } out <- list(AF = AF) return(out) } ######## xaxis = prevalence ############ compare = heritability AF_prevalence_heritability <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Heritability), ncol=length(Prevalence)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Prevalence)){ prevalence <- Prevalence[j] for (i in 1:length(Heritability)){ heritability <- Heritability[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = Intervention, b = Target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p =prevalence, delta = Intervention, b = Target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Heritability_text <- probabilities(Heritability) colnames(AF) <- Heritability_text AF$Prevalence <- Prevalence plot_data <- melt(AF,id.var="Prevalence") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Prevalence", "Heritability", "AF") ## Plot plot_prevalence <- ggplot(plot_data, aes(x=Prevalence,y=AF,group=Heritability,colour=Heritability)) + geom_line(aes(lty=Heritability), size=1) + geom_point(aes(shape=Heritability), size =2)+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Prevalence") print(plot_prevalence) out <- list(AF = AF, plot_prevalence = plot) } out <- list(AF = AF) return(out) } ############ compare = target AF_prevalence_target <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Target), ncol=length(Prevalence)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Prevalence)){ prevalence <- Prevalence[j] for (i in 1:length(Target)){ target <- Target[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = Intervention, b = target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p =prevalence, delta = Intervention, b = target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Target_percent_text <- probabilities(Target_percent) colnames(AF) <- Target_percent_text AF$Prevalence <- Prevalence plot_data <- melt(AF,id.var="Prevalence") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Prevalence", "Target_percent", "AF") ## Plot plot_prevalence <- ggplot(plot_data, aes(x=Prevalence,y=AF,group=Target_percent,colour=Target_percent)) + geom_line(aes(lty=Target_percent), size=1) + geom_point(aes(shape=Target_percent), size =2) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Prevalence", colour = "Targeted", lty="Targeted", shape="Targeted") print(plot_prevalence) out <- list(AF = AF, plot_prevalence = plot) } out <- list(AF = AF) return(out) } ############ compare = intervention AF_prevalence_intervention <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Intervention), ncol=length(Prevalence)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Prevalence)){ prevalence <- Prevalence[j] for (i in 1:length(Intervention)){ intervention <- Intervention[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = intervention, b = Target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p =prevalence, delta = intervention, b = Target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) colnames(AF) <- Intervention AF$Prevalence <- Prevalence plot_data <- melt(AF,id.var="Prevalence") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Prevalence", "Intervention", "AF") ## Plot plot_prevalence <- ggplot(plot_data, aes(x=Prevalence,y=AF,group=Intervention,colour=Intervention)) + geom_line(aes(lty=Intervention), size=1) + geom_point(aes(shape=Intervention), size =2) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Prevalence", colour = "Intervention", lty="Intervention", shape="Intervention") print(plot_prevalence) out <- list(AF = AF, plot_prevalence = plot) } out <- list(AF = AF) return(out) } ####### xaxis=target ############ compare = heritability AF_target_heritability <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Heritability), ncol=length(Target)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Target)){ target <- Target[j] for (i in 1:length(Heritability)){ heritability <- Heritability[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = Intervention, b = target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p =Prevalence, delta = Intervention, b = target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Heritability_text <- probabilities(Heritability) colnames(AF) <- Heritability_text AF$Target_percent <- Target_percent plot_data <- melt(AF,id.var="Target_percent") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Target_percent", "Heritability", "AF") ## Plot plot_target <- ggplot(plot_data, aes(x=Target_percent,y=AF,group=Heritability,colour=Heritability)) + geom_line(aes(lty=Heritability), size=1) + geom_point(aes(shape=Heritability)) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Proportion targeted") print(plot_target) out <- list(AF = AF, plot_target = plot) } out <- list(AF = AF) return(out) } ############ compare = prevalence AF_target_prevalence <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Prevalence), ncol=length(Target)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Target)){ target <- Target[j] for (i in 1:length(Prevalence)){ prevalence <- Prevalence[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = Intervention, b = target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = prevalence, delta = Intervention, b = target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Prevalence_text <- probabilities(Prevalence) colnames(AF) <- Prevalence_text AF$Target_percent <- Target_percent plot_data <- melt(AF,id.var="Target_percent") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Target_percent", "Prevalence", "AF") ## Plot plot_target <- ggplot(plot_data, aes(x=Target_percent,y=AF,group=Prevalence,colour=Prevalence)) + geom_line(aes(lty=Prevalence), size=1) + geom_point(aes(shape=Prevalence)) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Proportion targeted") print(plot_target) out <- list(AF = AF, plot_target = plot) } out <- list(AF = AF) return(out) } ############ compare = intervention AF_target_intervention <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Intervention), ncol=length(Target)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Target)){ target <- Target[j] for (i in 1:length(Intervention)){ intervention <- Intervention[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = intervention, b = target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = Prevalence, delta = intervention, b = target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) colnames(AF) <- Intervention AF$Target_percent <- Target_percent plot_data <- melt(AF,id.var="Target_percent") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Target_percent", "Intervention", "AF") ## Plot plot_target <- ggplot(plot_data, aes(x=Target_percent,y=AF,group=Intervention,colour=Intervention)) + geom_line(aes(lty=Intervention), size=1) + geom_point(aes(shape=Intervention)) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Proportion targeted") print(plot_target) out <- list(AF = AF, plot_target = plot) } out <- list(AF = AF) return(out) } ######## xaxis = intervention ############ compare = heritability AF_intervention_heritability <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Heritability), ncol=length(Intervention)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Intervention)){ intervention <- Intervention[j] for (i in 1:length(Heritability)){ heritability <- Heritability[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = intervention, b = Target, h2 = heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = Prevalence, delta = intervention, b = Target, h2 = heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Heritability_text <- probabilities(Heritability) colnames(AF) <- Heritability_text AF$Intervention <- Intervention plot_data <- melt(AF,id.var="Intervention") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Intervention", "Heritability", "AF") ## Plot plot_intervention <- ggplot(plot_data, aes(x=Intervention,y=AF,group=Heritability,colour=Heritability)) + geom_line(aes(lty=Heritability), size=1) + geom_point(aes(shape=Heritability)) + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Intervention effect (k)") print(plot_intervention) out <- list(AF = AF, plot_intervention = plot) } out <- list(AF = AF) return(out) } ############ compare = prevalence AF_intervention_prevalence <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Prevalence), ncol=length(Intervention)) # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Intervention)){ intervention <- Intervention[j] for (i in 1:length(Prevalence)){ prevalence <- Prevalence[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = prevalence, k = intervention, b = Target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = prevalence, delta = intervention, b = Target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Prevalence_text <- probabilities(Prevalence) colnames(AF) <- Prevalence_text AF$Intervention <- Intervention plot_data <- melt(AF,id.var="Intervention") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Intervention", "Prevalence", "AF") ## Plot plot_intervention <- ggplot(plot_data, aes(x=Intervention,y=AF,group=Prevalence,colour=Prevalence)) + geom_line(aes(lty=Prevalence), size=1) + geom_point(aes(shape=Prevalence))+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Intervention effect (k)") print(plot_intervention) out <- list(AF = AF, plot_intervention = plot) } out <- list(AF = AF) return(out) } ############ compare = target AF_intervention_target <- function(Prevalence, Heritability, Target, Intervention, Intervention_type, plot = TRUE, legend = TRUE, Disease, legend_position, ylim, ...){ AF <- matrix("list", nrow=length(Target), ncol=length(Intervention)) Target_percent <- Target # Transform the proportion targeted into a quantile Target <- qnorm(Target, lower.tail = FALSE) for (j in 1:length(Intervention)){ intervention <- Intervention[j] for (i in 1:length(Target)){ target <- Target[i] if (Intervention_type == "location") AF[i, j] <- Counterfactual_prev_target_location(p = Prevalence, k = intervention, b = target, h2 = Heritability) if (Intervention_type == "scale") AF[i, j] <- Counterfactual_prev_target_scale(p = Prevalence, delta = intervention, b = target, h2 = Heritability) } } AF <- t(AF) if(plot == TRUE){ AF <- as.data.frame(AF) Target_percent_text <- probabilities(Target_percent) colnames(AF) <- Target_percent_text AF$Intervention <- Intervention plot_data <- melt(AF,id.var="Intervention") plot_data$value <- as.numeric(plot_data$value) colnames(plot_data) <- c("Intervention", "Target_percent", "AF") ## Plot plot_intervention <- ggplot(plot_data, aes(x=Intervention, y=AF, group=Target_percent, colour=Target_percent)) + geom_line(aes(linetype=Target_percent), size=1) + geom_point(aes(shape=Target_percent), size =2) + theme_bw()+ theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), text = element_text(size=17))+ labs(x= "Intervention effect (k)", colour = "Targeted", lty="Targeted", shape="Targeted") print(plot_intervention) out <- list(AF = AF, plot_intervention = plot) } out <- list(AF = AF) return(out) }
/scratch/gouwar.j/cran-all/cranData/AFheritability/R/AFheritability_functions.R
#' @title The shiny application \code{AFheritability} is a user interface for the function \code{\link[AFheritability]{AFfunction}} #' @description The shiny-app provides a user friendly interface for the function \code{\link[AFheritability]{AFfunction}}. #' @author Elisabeth Dahlqwist #' @details By running runShinyApp() a user interface for the function \code{\link[AFheritability]{AFfunction}} is started in RStudio. The app is also available online \url{https://afheritability.shinyapps.io/afheritability/} (Note that the app is usually faster in the web browser Google Chrome or Firefox). #' @references Dahlqwist E et al. (2019) <doi:10.1007/s00439-019-02006-8>. #' @import shiny #' @export runShinyApp <- function() { appDir <- system.file("shiny-examples", "AFheritability_shiny", package = "AFheritability") if (appDir == "") { stop("Could not find example directory. Try re-installing `AFheritability`.", call. = FALSE) } shiny::runApp(appDir, display.mode = "normal") }
/scratch/gouwar.j/cran-all/cranData/AFheritability/R/runShinyApp.R
########### Functions for shiny app alternatives <- data.frame(xaxis = c("Heritability", "Heritability", "Heritability", "Prevalence", "Prevalence", "Prevalence", "Target", "Target", "Target", "Intervention", "Intervention", "Intervention"), compare = c("Prevalence", "Target", "Intervention", "Heritability", "Target", "Intervention", "Heritability", "Prevalence", "Intervention", "Heritability" ,"Prevalence", "Target"), row.names = NULL, stringsAsFactors = FALSE) value_maker <- function(xaxis, compare){ if(xaxis == "Heritability" && compare == "Prevalence") { H_value <- c(0, 1) P_value <- c(0.02,0.1) T_value <- 0.01 I_value <- 1 } if(xaxis == "Heritability" && compare == "Target") { H_value <- c(0, 1) P_value <- 0.5 T_value <- c(0.01, 0.3) I_value <- 1 } if(xaxis == "Heritability" && compare == "Intervention") { H_value <- c(0, 1) P_value <- 0.3 T_value <- 0.05 I_value <- c(1,5) } if(xaxis == "Prevalence" && compare == "Heritability") { H_value <- c(0.2, 0.6) P_value <- c(0, 1) T_value <- 0.05 I_value <- 1 } if(xaxis == "Prevalence" && compare == "Target") { H_value <- 0.3 P_value <- c(0, 1) T_value <- c(0.05, 0.1) I_value <- 1 } if(xaxis == "Prevalence" && compare == "Intervention") { H_value <- 0.3 P_value <- c(0, 1) T_value <- 0.05 I_value <- c(1,5) } if(xaxis == "Target" && compare == "Heritability") { H_value <- c(0.2, 0.6) P_value <- 0.3 T_value <- c(0, 1) I_value <- 1 } if(xaxis == "Target" && compare == "Prevalence") { H_value <- 0.5 P_value <- c(0.00001,0.3) T_value <- c(0, 1) I_value <- 1 } if(xaxis == "Target" && compare == "Intervention") { H_value <- 0.5 P_value <- 0.3 T_value <- c(0, 1) I_value <- c(1,3) } if(xaxis == "Intervention" && compare == "Heritability") { H_value <- c(0.2, 0.6) P_value <- 0.3 T_value <- 0.05 I_value <- c(0,10) } if(xaxis == "Intervention" && compare == "Prevalence") { H_value <- 0.5 P_value <- c(0.00001,0.3) T_value <- 0.05 I_value <- c(0,10) } if(xaxis == "Intervention" && compare == "Target") { H_value <- 0.3 P_value <- 0.08 T_value <- c(0.003, 0.05) I_value <- c(0,10) } value <- list(H_value = H_value, P_value = P_value, T_value = T_value, I_value = I_value) return(value) }
/scratch/gouwar.j/cran-all/cranData/AFheritability/inst/shiny-examples/AFheritability_shiny/global.R
library(shiny) shinyServer(function(input, output, session) { observeEvent( input$xaxis, updateSelectInput(session, "compare", "Show at several different values of:", choices = alternatives$compare[alternatives$xaxis == input$xaxis])) output$Heritability_slider <- renderUI({ values <- value_maker(input$xaxis, input$compare) if(length(values$H_value)==1) sliderInput("Heritability", "Heritability", min=0, max = 1, value = values$H_value, step = 0.0001) else sliderInput("Heritability", "Heritability", min=0, max = 1, value = c(values$H_value[1], values$H_value[2]), step = 0.0001) }) output$Prevalence_slider <- renderUI({ values <- value_maker(input$xaxis, input$compare) if(length(values$P_value)==1) sliderInput("Prevalence", "Prevalence", min=0, max = 1, value = values$P_value, step = 0.000001) else sliderInput("Prevalence", "Prevalence", min=0, max = 1, value = c(values$P_value[1], values$P_value[2]), step = 0.000001) }) output$Target_slider <- renderUI({ values <- value_maker(input$xaxis, input$compare) if(length(values$T_value)==1) sliderInput("Target", "Proportion at highest genetic risk that are targeted by the intervention", min=0, max = 1, value = values$T_value, step = 0.0001) else sliderInput("Target", "Proportion at highest genetic risk that are targeted by the intervention", min=0, max = 1, value = c(values$T_value[1], values$T_value[2]), step = 0.0001) }) output$Intervention_slider <- renderUI({ values <- value_maker(input$xaxis, input$compare) if(length(values$I_value)==1) sliderInput("Intervention", "Intervention effect (i.e. standardized reduction in mean genetic risk)", min=0, max = 10, value = values$I_value, step = 0.1) else sliderInput("Intervention", "Intervention effect (i.e. standardized reduction in mean genetic risk)", min=0, max = 10, value = c(values$I_value[1], values$I_value[2]), step = 0.1) }) output$AFfunction <- renderPlot({ Prev <- input$Prevalence Her <- input$Heritability Tar <- input$Target Inter <- input$Intervention if(length(Her) > 1){ Her <- seq(min(Her), max(Her), by= (max(Her)-min(Her))/10) if(input$compare == "Heritability") Her <- quantile(Her, probs=c(0,0.25, 0.5, 0.75, 1), names=F) } if(length(Prev) > 1){ Prev <- seq(min(Prev), max(Prev), by= (max(Prev)-min(Prev))/10) if(input$compare == "Prevalence") Prev <- quantile(Prev, probs=c(0,0.25, 0.5, 0.75, 1), names=F) } if(length(Tar) > 1){ Tar <- seq(min(Tar), max(Tar), by= (max(Tar)-min(Tar))/10) if(input$compare == "Target") Tar <- quantile(Tar, probs=c(0,0.25, 0.5, 0.75, 1), names=F) } if(length(Inter) > 1){ Inter <- seq(min(Inter), max(Inter), by= (max(Inter)-min(Inter))/10) if(input$compare == "Intervention") Inter <- quantile(Inter, probs=c(0,0.25, 0.5, 0.75, 1), names=F) } AFfunction(Prevalence = Prev, Heritability = Her, Target = Tar, Intervention = Inter, xaxis = input$xaxis, compare = input$compare, yaxis = c(0, 0.16)) }) })
/scratch/gouwar.j/cran-all/cranData/AFheritability/inst/shiny-examples/AFheritability_shiny/server.r
library(shiny) shinyUI(fluidPage( # Application title headerPanel("The attributable fraction and the heritability"), sidebarLayout( sidebarPanel( selectInput("xaxis", "Choose x-axis", choices = unique(alternatives$xaxis)), selectInput("compare", "Show at several different values of:", choices= "", selected=""), uiOutput("Heritability_slider"), uiOutput("Prevalence_slider"), uiOutput("Target_slider"), uiOutput("Intervention_slider") ), # Show a plot of the AF and heritability mainPanel(h5("This app shows how the attributable fraction (AF) can be expressed as a function of the heritability, disease prevalence, target group size and intervention effect. For more information read 'On the relationship between the attributable fraction and the heritability' (Dahlqwist et. al)."), h1("Plot"), tags$style(type="text/css", ".shiny-output-error { visibility: hidden; }", ".shiny-output-error:before { visibility: hidden; }" ), plotOutput("AFfunction"), h5("Note!"), h6("The lines represent the 0%, 25%, 50%, 75% and 100% percentiles of the range of the variable chosen from the tab 'Show at several different values of'."), helpText( a("Code is available here", href="https://github.com/ElisabethDahlqwist/AFheritability")) ) ) ) )
/scratch/gouwar.j/cran-all/cranData/AFheritability/inst/shiny-examples/AFheritability_shiny/ui.R
#'@importFrom stats approx coef dnorm fitted lm pnorm qnorm qqnorm resid spline #'@importFrom grDevices gray #'@importFrom graphics Axis abline axis box co.intervals grid lines mtext par plot plot.new plot.window points rect text #'@importFrom utils packageDescription #'@importFrom gamlss.dist pBCCG pBCPE pBCT qBCCG qBCPE qBCT #'@importFrom gamlss gamlss gamlss.control is.gamlss predictAll NULL #'Growth of Dutch boys #' #'Height, weight, head circumference and puberty of 7482 Dutch boys. #' #'The complete sample of cross-sectional data from boys 0-21 years used to #'construct the Dutch growth references 1997. Variables \code{gen} and #'\code{phb} are ordered factors. \code{reg} is a factor. Note: A 10\% sample #'from this data is available in data set \code{boys} in the \code{mice} #'package. #' #'@name boys7482 #'@docType data #'@format A data frame with 7482 rows on the following 9 variables: #'\describe{ #'\item{age}{Decimal age (0-21 years)} #'\item{hgt}{Height (cm)} #'\item{wgt}{Weight (kg)} #'\item{bmi}{Body mass index} #'\item{hc}{Head circumference (cm)} #'\item{gen}{Genital Tanner stage (G1-G5)} #'\item{phb}{Pubic hair (Tanner P1-P6)} #'\item{tv}{Testicular volume (ml)} #'\item{reg}{Region (north, east, west, south, city)} #'} #'@author Stef van Buuren, 2012 #'@source Fredriks, A.M,, van Buuren, S., Burgmeijer, R.J., Meulmeester JF, #'Beuker, R.J., Brugman, E., Roede, M.J., Verloove-Vanhorick, S.P., Wit, J.M. #'(2000) Continuing positive secular growth change in The Netherlands #'1955-1997. \emph{Pediatric Research}, \bold{47}, 316-323. #' #'Fredriks, A.M., van Buuren, S., Wit, J.M., Verloove-Vanhorick, S.P. (2000). #'Body index measurements in 1996-7 compared with 1980. \emph{Archives of #'Disease in Childhood}, \bold{82}, 107-112. #'@keywords datasets NULL #'Reference tables from CDC 2000 #' #'Reference tables from CDC 2000 #' #'The models were fitted by the LMS model. Parameters are stored as type #'\code{LMS}. Tabulated values are point ages. #' #'The naming conventions are as follows: \describe{ #'\item{list("cdc.hgt")}{Combined length/height (cm) for Age, 0-20 years. #'Measures <2 years apply to length (lying), while ages >= 2 years apply to #'height, or stature (standing).} #'\item{list("cdc.wgt")}{Weight (kg) for Age, 0-20 years.} #'\item{list("cdc.bmi")}{Body Mass Index (kg/m2) for Age, 2-20 years.}} #' #'@name References CDC #'@aliases cdc.hgt cdc.wgt cdc.bmi #'@docType data #'@format A data frame with seven variables: \describe{ #'\item{list("pop")}{Study Population} \item{list("sub")}{Subpopulation} #'\item{list("sex")}{Sex (M,F)} \item{list("x")}{Decimal age (0-5 years)} #'\item{list("L")}{Lambda (skewness) curve} \item{list("M")}{Median curve} #'\item{list("S")}{Coefficient of Variation curve} } #'@seealso \code{\link{nl4.wgt}}, \code{\link{nl4.hgt}}, \code{\link{nl4.bmi}}, #'\code{\link{who.wgt}} #'@source Kuczmarski RJ, Ogden CL, Guo SS, Grummer-Strawn LM, Flegal KM, Mei Z, #'Wei R, Curtin LR, Roche AF, Johnson CL. 2000 CDC growth charts for the #'United States: methods and development. \emph{Vital Health Stat}, 2002, #'\bold{11}, 246, 1-190. #'@keywords datasets NULL #'Reference tables from Third Dutch Growth Study 1980 #' #'Reference table from the Third Dutch Growth Study 1980 #' #'The model was fitted by the LMS model. Parameters are stored as type #'\code{LMS}. Tabulated values are point ages. #' #'Height follows a normal distribution, with all lambda parameters set equal to #'1. The standard deviation (in cm) is obtained as \code{S*M}. #' #'The naming conventions are as follows: \describe{ #'\item{list("nl4.hgt")}{Length/Height (cm) for Age} #'\item{list("nl4.wgt")}{Weight (kg) for Age} \item{list("nl4.wfh")}{Weight #'(kg) for Height (cm)} \item{list("nl4.bmi")}{Head circumference (cm) for Age} #'\item{list("nl4.lgl")}{Leg Length (cm) for Age} \item{list("nl4.hip")}{Hip #'circumference (cm) for Age} \item{list("nl4.wst")}{Waist circumference (cm) #'for Age} \item{list("nl4.whr")}{Waist/Hip ratio for Age} #'\item{list("nl4.sit")}{Sitting Height for Age} \item{list("nl4.shh")}{Sitting #'Height/Height ratio for Age} } #' #'@name References NL3 #'@aliases nl3.bmi #'@docType data #'@format A data frame with seven variables: \describe{ #'\item{list("pop")}{Study Population} \item{list("sub")}{Subpopulation, e.g. #'ethnicity or age group (for \code{nl4.wfh})} \item{list("sex")}{Sex (M,F)} #'\item{list("x")}{Decimal age (0-21 years) or Height (for \code{nl4.wfh})} #'\item{list("L")}{Lambda (skewness) curve} \item{list("M")}{Median curve} #'\item{list("S")}{Coefficient of Variation curve} } #'@seealso \code{\link{cdc.wgt}}, \code{\link{who.wgt}} #'@source Fredriks, A.M,, van Buuren, S., Burgmeijer, R.J., Meulmeester JF, #'Beuker, R.J., Brugman, E., Roede, M.J., Verloove-Vanhorick, S.P., Wit, J.M. #'(2000) Continuing positive secular growth change in The Netherlands #'1955-1997. \emph{Pediatric Research}, \bold{47}, 316-323. #' #'Fredriks, A.M., van Buuren, S., Wit, J.M., Verloove-Vanhorick, S.P. (2000). #'Body index measurements in 1996-7 compared with 1980. \emph{Archives of #'Disease in Childhood}, \bold{82}, 107-112. #' #'@keywords datasets NULL #'Reference tables from Fourth Dutch Growth Study 1997 #' #'Reference table from the Fourth Dutch Growth Study 1997 #' #'The model was fitted by the LMS model. Parameters are stored as type #'\code{LMS}. Tabulated values are point ages. #' #'Height follows a normal distribution, with all lambda parameters set equal to #'1. The standard deviation (in cm) is obtained as \code{S*M}. #' #'The naming conventions are as follows: \describe{ #'\item{list("nl4.hgt")}{Length/Height (cm) for Age} #'\item{list("nl4.wgt")}{Weight (kg) for Age} \item{list("nl4.wfh")}{Weight #'(kg) for Height (cm)} \item{list("nl4.bmi")}{Head circumference (cm) for Age} #'\item{list("nl4.lgl")}{Leg Length (cm) for Age} \item{list("nl4.hip")}{Hip #'circumference (cm) for Age} \item{list("nl4.wst")}{Waist circumference (cm) #'for Age} \item{list("nl4.whr")}{Waist/Hip ratio for Age} #'\item{list("nl4.sit")}{Sitting Height for Age} \item{list("nl4.shh")}{Sitting #'Height/Height ratio for Age} } #' #'@name References NL4 #'@aliases nl4.hgt nl4.wgt nl4.wfh nl4.bmi nl4.hdc nl4.lgl nl4.hip nl4.wst #'nl4.whr nl4.sit nl4.shh #'@docType data #'@format A data frame with seven variables: \describe{ #'\item{list("pop")}{Study Population} \item{list("sub")}{Subpopulation, e.g. #'ethnicity or age group (for \code{nl4.wfh})} \item{list("sex")}{Sex (M,F)} #'\item{list("x")}{Decimal age (0-21 years) or Height (for \code{nl4.wfh})} #'\item{list("L")}{Lambda (skewness) curve} \item{list("M")}{Median curve} #'\item{list("S")}{Coefficient of Variation curve} } #'@seealso \code{\link{cdc.wgt}}, \code{\link{who.wgt}} #'@source Fredriks, A.M,, van Buuren, S., Burgmeijer, R.J., Meulmeester JF, #'Beuker, R.J., Brugman, E., Roede, M.J., Verloove-Vanhorick, S.P., Wit, J.M. #'(2000) Continuing positive secular growth change in The Netherlands #'1955-1997. \emph{Pediatric Research}, \bold{47}, 316-323. #' #'Fredriks, A.M., van Buuren, S., Wit, J.M., Verloove-Vanhorick, S.P. (2000). #'Body index measurements in 1996-7 compared with 1980. \emph{Archives of #'Disease in Childhood}, \bold{82}, 107-112. #' #'@keywords datasets NULL #'References WHO #' #'Reference tables, combined from the WHO Multicentre Growth Reference #'Study (MGRS) (ages 0-5 years) and the WHO 2007 reference (5-19 years). #' #'The data were fitted by the LMS model. Parameters are stored as type #'\code{LMS}. Tabulated values are point ages. #' #'The naming conventions are as follows: #'\describe{ #'\item{who.hgt}{Length (cm, 0-2 Yrs) or height (cm, 2-19 years)} #'\item{who.wgt}{Weight (kg) for age (0-10 years)} #'\item{who.bmi}{BMI (kg/m^2) for age (0-19 years)} #'\item{who.hdc}{Head circumference (cm) for age (0-5 years)} #'\item{who.wfh}{Weight (kg) for height (65-120 cm)} #'\item{who.wfl}{Weight (kg) for length (45-110 cm)} #'} #' #'@name References WHO #'@aliases who.wgt who.hgt who.bmi who.wfh who.wfl who.hdc #'@docType data #'@format A data frame with seven variables: #'\describe{ #'\item{pop}{Study Population (always \code{"who"})} #'\item{sub}{Subpopulation (always \code{"N"})} #'\item{sex}{Sex (M, F)} #'\item{x}{Decimal age, height (cm) or length(cm)} #'\item{L}{Lambda (skewness) curve} #'\item{M}{Median curve} #'\item{S}{Coefficient of variation} #'} #'@seealso \code{\link{nl4.wgt}}, \code{\link{cdc.wgt}}, #'\url{http://www.who.int/childgrowth/mgrs/en/}, #'\url{http://www.who.int/growthref/en/} #'@source #'WHO Multicentre Growth Reference Study Group. WHO Child Growth #'Standards based on length/height, weight and age. \emph{Acta Paediatr}, #'Suppl. 2006, 450, 76-85. #' #'de Onis M, Onyango AW, Borghi E, Siyam A, Nishida C, Siekmann J. #'Development of a WHO growth reference for school-aged children and adolescents #'\emph{Bulletin of the World Health Organization}, 2007;85:660-7. #'@keywords datasets NULL
/scratch/gouwar.j/cran-all/cranData/AGD/R/AGD-package.R
# agd.r # # Tools for course Analysis of Growth Data ### SvB 20dec2014 #'Convert standard deviation scores (SDS) to measurements #' #'Converts standard deviation score (SDS) into measurements using #'an age- and sex-conditional external reference. #' #'Functions \code{z2y()} and \code{y2z()} are the inverse of each other. #' #'The argument \code{dist} determines the statistical distribution. The #'possibilities are as follows: \describe{ \item{list("\"NO\"")}{\code{ref} #'should contain columns \code{mean} and \code{sd}, containing the mean and the #'standard deviation in the external reference population.} #'\item{list("\"LMS\"")}{\code{ref} should contain columns \code{L}, \code{S} #'and \code{M} containing the LMS parameters.} #'\item{list("\"BCCG\"")}{\code{ref} should contain columns \code{mu}, #'\code{sigma} and \code{nu} containing the Box-Cox Cole-Green parameters.} #'\item{list("\"BCPE\"")}{\code{ref} should contain columns \code{mu}, #'\code{sigma}, \code{nu} and \code{tau} containing the Box-Cox Power #'Exponential parameters.} \item{list("\"BCT\"")}{\code{ref} should contain #'columns \code{mu}, \code{sigma}, \code{nu} and \code{tau} containing the #'Box-Cox T distribution parameters.} } #' #'@aliases z2y #'@param z A numerical vector containing standard deviation scores that are to #'be converted. The length \code{length(z)} determines the size of the output #'vector. #'@param x A vector containing the values of the numerical covariate (typically #'decimal age or height) at which conversion is desired. Values are replicated #'to match \code{length(y)}. #'@param sex A character vector indicating whether the male (\code{"M"}) of #'female (\code{"F"})reference should be used. Values are replicated to match #'\code{length(y)}. #'@param sub A character vector indicating the level of the \code{sub} field of #'the reference standard defined in \code{ref} #'@param ref A data frame containing a factor \code{sex}, a numerical variable #'\code{age} containing the tabulated decimal point ages, and two or more #'numerical variables with reference values. See details. #'@param dist A string identifying the type of distribution. Values values are: #'\code{"NO"}, \code{"BCCG"}, \code{"LMS"}, \code{"BCPE"} and \code{"BCT"}. #'The default is \code{"LMS"}. #'@param dec A scalar value indicating the number of decimals used to round the #'value. #'@param sex.fallback The level of the \code{sex} field used when no match is #'found. The default \code{sex.fallback=NA} specifies that #'unmatched entries should receive a \code{NA} value. #'@param sub.fallback The level of the \code{sub} field used when no match is #'found. The default \code{sub.fallback=NA} specifies that #'unmatched entries should receive a \code{NA} value. #'@return For \code{y2z()}: A vector with \code{length(y)} elements containing #'the standard deviation score. For \code{z2y()}: A vector with #'\code{length(z)} elements containing quantiles. #'@author Stef van Buuren, 2010 #'@seealso \code{\link{y2z}} #'@keywords distribution #'@examples #' #' #'boys <- boys7482 #' #'# quantile at SD=0 of age 2 years, #'# height Dutch boys #'z2y(z=0, x=2) #' #'# same for Dutch girls #'z2y(z=0, x=2, sex="F") #' #'# quantile at SD=c(-1,0,1) of age 2 years, BMI Dutch boys #'z2y(z=c(-1,0,+1), x=2, ref=nl4.bmi) #' #'# 0SD line (P50) in kg of weight for age in 5-10 year, Dutch boys #'z2y(z=rep(0,6), x=5:10, ref=nl4.wgt) #' #'# 95th percentile (P95), age 10 years, wfa, Dutch boys #'z2y(z=qnorm(0.95), x=10, ref=nl4.wgt) #' #'# table of P3, P10, P50, P90, P97 of weight for 5-10 year old dutch boys #'# age per year #'age <- 5:10 #'p <- c(0.03,0.1,0.5,0.9,0.97) #'z <- rep(qnorm(p), length(age)) #'x <- rep(age, each=length(p)) #'w <- matrix(z2y(z, x=x, sex="M", ref=nl4.wgt), ncol=length(p), #' byrow=TRUE) #'dimnames(w) <- list(age, p) #'round(w,1) #' #'# standard set of Z-scores of weight for all tabulated ages, boys & girls #'# and three etnicities #'sds <- c(-2.5, -2, -1, 0, 1, 2, 2.5) #'age <- nl4.wgt$x #'z <- rep(sds, times=length(age)) #'x <- rep(age, each=length(sds)) #'sex <- rep(c("M","F"), each=length(z)/2) #'w <- z2y(z=z, x=x, sex=sex, ref=nl4.wgt) #'w <- matrix(w, ncol=length(sds), byrow=TRUE) #'dimnames(w) <- list(age, sds) #'data.frame(sub=nl4.wgt$sub,sex=nl4.wgt$sex,round(w,2), row.names=NULL) #' #'# P85 of BMI in 5-8 year old Dutch boys and girls #'e <- expand.grid(age=5:8, sex=c("M","F")) #'w <- z2y(z=rep(qnorm(0.85),nrow(e)), x=e$age, sex=e$sex, ref=nl4.bmi) #'w <- matrix(w, nrow=2, byrow=TRUE) #'dimnames(w) <- list(c("boys","girls"),5:8) #'w #' #'# data transformation of height z-scores to cm-scale #'z <- c(-1.83, 0.09, 2.33, 0.81, -1.20) #'x <- c(8.33, 0.23, 19.2, 24.3, 10) #'sex <- c("M", "M", "F", "M", "F") #'round(z2y(z=z, x=x, sex=sex, ref=nl4.hgt), 1) #' #'# interpolate published height standard #'# to daily values, days 0-31, boys #'# on centiles -2SD, 0SD and +2SD #'days <- 0:31 #'sds <- c(-2, 0, +2) #'z <- rep(sds, length(days)) #'x <- rep(round(days/365.25,4), each=length(sds)) #'w <- z2y(z, x, sex="M", ref=nl4.hgt) #'w <- matrix(w, ncol=length(sds), byrow=TRUE) #'dimnames(w) <- list(days, sds) #'w #' #'@export z2y <- function(z = c(-2, 0, 2), x = 1, sex = "M", sub = "N", ref = get("nl4.hgt"), dist = "LMS", dec = 3, sex.fallback = NA, sub.fallback = NA) { z2y.grp <- function(z, x, ref, dist = "LMS") { if (dist=="NO") { check.names(df=ref, needed=c("x","mean","sd")) mean <- approx(x=ref[,"x"], y=ref[,"mean"], xout=x)$y sd <- approx(x=ref[,"x"], y=ref[,"sd"], xout=x)$y return(mean + z*sd) } if (dist=="LMS") { check.names(df=ref, needed=c("x","L","M","S")) L <- approx(x=ref[,"x"], y=ref[,"L"], xout=x)$y M <- approx(x=ref[,"x"], y=ref[,"M"], xout=x)$y S <- approx(x=ref[,"x"], y=ref[,"S"], xout=x)$y return(ifelse(L>0.01|L<(-0.01),M*(1+L*S*z)^(1/L),M*exp(S*z))) } if (dist=="BCCG") { check.names(df=ref, needed=c("x","nu","mu","sigma")) nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y return(qBCCG(pnorm(z), mu=mu, sigma=sigma, nu=nu)) } if (dist=="BCPE") { check.names(df=ref, needed=c("x","nu","mu","sigma","tau")) mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y tau <- approx(x=ref[,"x"], y=ref[,"tau"], xout=x)$y return(qBCPE(pnorm(z), mu=mu, sigma=sigma, nu=nu, tau=tau)) } if (dist=="BCT") { check.names(df=ref, needed=c("x","nu","mu","sigma","tau")) mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y tau <- approx(x=ref[,"x"], y=ref[,"tau"], xout=x)$y return(qBCT(pnorm(z), mu=mu, sigma=sigma, nu=nu, tau=tau)) } stop(paste("Reference type", dist, "not implemented.")) } if (!is.data.frame(ref)) stop("'ref' should be a data frame.") n <- length(z) if (n < 1) stop("'z' must have 1 or more values") if(!is.vector(z)) return(as.numeric(rep(NA, n))) if(!is.numeric(z)) return(as.numeric(rep(NA, n))) x <- rep(x, length.out=length(z)) sex <- rep(sex, length.out=length(z)) sub <- rep(sub, length.out=length(z)) dist <- match.arg(dist, choices=c("NO","LMS","BCCG","BCPE","BCT")) # available levels in ref: sex, sub lev.sex <- levels(ref$sex[, drop=TRUE]) lev.sub <- levels(ref$sub[, drop=TRUE]) # replace nomatching levels idx <- is.na(match(sub, lev.sub)) if (any(idx)) { sub[idx] <- sub.fallback if (!is.na(sub.fallback)) warning("Entries (n=",sum(idx),") replaced by '",sub.fallback,"'",sep="") } idx <- is.na(match(sex, lev.sex)) if (any(idx)) { sex[idx] <- sex.fallback if (!is.na(sex.fallback)) warning("Entries (n=",sum(idx),") replaced by '",sex.fallback,"'",sep="") } refs <- with(ref,split(ref, f=list(sub, sex))) xs <- split(x,list(sub, sex)) zs <- split(z,list(sub, sex)) ys <- vector("list",length(zs)) names(ys) <- names(zs) for(i in 1:length(zs)) { name <- names(zs)[i] if(is.null(refs[[name]])) ys[[name]] <- rep(NA,length=length(zs[[name]])) else ys[[name]] <- z2y.grp(z=zs[[name]], x=xs[[name]], ref=refs[[name]], dist=dist) } y <- unsplit(ys,f=list(sub,sex)) names(y) <- names(z) return(round(y,dec)) } #'Converts measurements to standard deviation scores (SDS) #' #'Converts measurements into age- and sex-conditional standard deviation score #'(SDS) using an external reference. #' #'Functions \code{z2y()} and \code{y2z()} are the inverse of each other. #' #'The argument \code{dist} determines the statistical distribution. The #'possibilities are as follows: \describe{ \item{list("\"NO\"")}{\code{ref} #'should contain columns \code{mean} and \code{sd}, containing the mean and the #'standard deviation in the external reference population.} #'\item{list("\"LMS\"")}{\code{ref} should contain columns \code{L}, \code{S} #'and \code{M} containing the LMS parameters.} #'\item{list("\"BCCG\"")}{\code{ref} should contain columns \code{mu}, #'\code{sigma} and \code{nu} containing the Box-Cox Cole-Green parameters.} #'\item{list("\"BCPE\"")}{\code{ref} should contain columns \code{mu}, #'\code{sigma}, \code{nu} and \code{tau} containing the Box-Cox Power #'Exponential parameters.} \item{list("\"BCT\"")}{\code{ref} should contain #'columns \code{mu}, \code{sigma}, \code{nu} and \code{tau} containing the #'Box-Cox T distribution parameters.} } #' #'@aliases y2z #'@param y A numerical vector containing the outcome measurements. The length #'\code{length(y)} determines the size of the output vector. #'@param x A vector containing the values of the numerical covariate (typically #'decimal age or height) at which conversion is desired. Values are replicated #'to match \code{length(y)}. #'@param sex A character vector indicating whether the male (\code{"M"}) of #'female (\code{"F"})reference should be used. Values are replicated to match #'\code{length(y)}. #'@param sub A character vector indicating the level of the \code{sub} field of #'the reference standard defined in \code{ref} #'@param ref A data frame containing a factor \code{sex}, a numerical variable #'\code{age} containing the tabulated decimal point ages, and two or more #'numerical variables with reference values. See details. #'@param dist A string identifying the type of distribution. Values values are: #'\code{"NO"}, \code{"BCCG"}, \code{"LMS"}, \code{"BCPE"} and \code{"BCT"}. #'The default is \code{"LMS"}. #'@param dec A scalar value indicating the number of decimals used to round the #'value. #'@param sex.fallback The level of the \code{sex} field used when no match is #'found. The default \code{sex.fallback=NA} specifies that #'unmatched entries should receive a \code{NA} value. #'@param sub.fallback The level of the \code{sub} field used when no match is #'found. The default \code{sub.fallback=NA} specifies that #'unmatched entries should receive a \code{NA} value. #'@param tail.adjust Logical. If \code{TRUE} then the WHO method for #'tail adjustment is applied. The default is \code{FALSE}. #'@return For \code{y2z()}: A vector with \code{length(y)} elements containing #'the standard deviation score. For \code{z2y()}: A vector with #'\code{length(z)} elements containing quantiles. #'@author Stef van Buuren, 2010 #'@seealso \code{\link{z2y}} #'@keywords distribution #'@examples #' #' #'boys <- boys7482 #' #'# SDS of height 115 cm at age 5 years, #'# relative to Dutch boys reference #'y2z(y=115, x=5) #' #'# same relative to Dutch girls #'y2z(y=115, x=5, sex="F") #' #'# SDS of IOTF BMI cut-off value for overweight (boys 2-18) #'# relative to Dutch boys reference #'cutoff <- c( #'18.41, 18.15, 17.89, 17.72, 17.55, 17.49, 17.42, 17.49, 17.55, 17.74, #'17.92, 18.18, 18.44, 18.77, 19.10, 19.47, 19.84, 20.20, 20.55, 20.89, #'21.22, 21.57, 21.91, 22.27, 22.62, 22.96, 23.29, 23.60, 23.90, 24.18, #'24.46, 24.73, 25.00) #'age <- seq(2, 18, by=0.5) #'(z <- y2z(y=cutoff, x=age, sex="M", ref=nl4.bmi)) #' #'# apply inverse transformation to check calculations #'round(z2y(z, age, ref=nl4.bmi), 2) #'cutoff #' #'# calculate percentiles of weight 12 kg at 2 years (boys, girls) #'100*round(pnorm(y2z(y=c(12,12), x=2, sex=c("M","F"), ref=nl4.wgt)),2) #' #'# # percentage of children lighter than 15kg at ages 2-5 #'e <- expand.grid(age=2:5, sex=c("M","F")) #'z <- y2z(y=rep(15,nrow(e)), x=e$age, sex=e$sex, ref=nl4.wgt) #'w <- matrix(100*round(pnorm(z),2), nrow=2, byrow=TRUE) #'dimnames(w) <- list(c("boys","girls"),2:5) #'w #' #'# analysis in Z scale #'hgt.z <- y2z(y=boys$hgt, x=boys$age, sex="M", ref=nl4.hgt) #'wgt.z <- y2z(y=boys$wgt, x=boys$age, sex="M", ref=nl4.wgt) #'plot(hgt.z, wgt.z, col="blue") #' #' #'# z2y #' #'# quantile at SD=0 of age 2 years, #'# height Dutch boys #'z2y(z=0, x=2) #' #'# same for Dutch girls #'z2y(z=0, x=2, sex="F") #' #'# quantile at SD=c(-1,0,1) of age 2 years, BMI Dutch boys #'z2y(z=c(-1,0,+1), x=2, ref=nl4.bmi) #' #'# 0SD line (P50) in kg of weight for age in 5-10 year, Dutch boys #'z2y(z=rep(0,6), x=5:10, ref=nl4.wgt) #' #'# 95th percentile (P95), age 10 years, wfa, Dutch boys #'z2y(z=qnorm(0.95), x=10, ref=nl4.wgt) #' #'# table of P3, P10, P50, P90, P97 of weight for 5-10 year old dutch boys #'# age per year #'age <- 5:10 #'p <- c(0.03,0.1,0.5,0.9,0.97) #'z <- rep(qnorm(p), length(age)) #'x <- rep(age, each=length(p)) #'w <- matrix(z2y(z, x=x, sex="M", ref=nl4.wgt), ncol=length(p), #' byrow=TRUE) #'dimnames(w) <- list(age, p) #'round(w,1) #' #'# standard set of Z-scores of weight for all tabulated ages, boys & girls #'# and three etnicities #'sds <- c(-2.5, -2, -1, 0, 1, 2, 2.5) #'age <- nl4.wgt$x #'z <- rep(sds, times=length(age)) #'x <- rep(age, each=length(sds)) #'sex <- rep(c("M","F"), each=length(z)/2) #'w <- z2y(z=z, x=x, sex=sex, ref=nl4.wgt) #'w <- matrix(w, ncol=length(sds), byrow=TRUE) #'dimnames(w) <- list(age, sds) #'data.frame(sub=nl4.wgt$sub,sex=nl4.wgt$sex,round(w,2), row.names=NULL) #' #'# P85 of BMI in 5-8 year old Dutch boys and girls #'e <- expand.grid(age=5:8, sex=c("M","F")) #'w <- z2y(z=rep(qnorm(0.85),nrow(e)), x=e$age, sex=e$sex, ref=nl4.bmi) #'w <- matrix(w, nrow=2, byrow=TRUE) #'dimnames(w) <- list(c("boys","girls"),5:8) #'w #' #'# data transformation of height z-scores to cm-scale #'z <- c(-1.83, 0.09, 2.33, 0.81, -1.20) #'x <- c(8.33, 0.23, 19.2, 24.3, 10) #'sex <- c("M", "M", "F", "M", "F") #'round(z2y(z=z, x=x, sex=sex, ref=nl4.hgt), 1) #' #'# interpolate published height standard #'# to daily values, days 0-31, boys #'# on centiles -2SD, 0SD and +2SD #'days <- 0:31 #'sds <- c(-2, 0, +2) #'z <- rep(sds, length(days)) #'x <- rep(round(days/365.25,4), each=length(sds)) #'w <- z2y(z, x, sex="M", ref=nl4.hgt) #'w <- matrix(w, ncol=length(sds), byrow=TRUE) #'dimnames(w) <- list(days, sds) #'w #' #'@export y2z <- function(y = c(75, 80, 85), x = 1, sex = "M", sub = "N", ref = get("nl4.hgt"), dist = "LMS", dec = 3, sex.fallback = NA, sub.fallback = NA, tail.adjust = FALSE) { y2z.grp <- function(y, x, ref, dist = "LMS", dec = 3, tail.adjust = FALSE){ if (dist=="NO") { check.names(df=ref, needed=c("x","mean","sd")) mean <- approx(x=ref[,"x"], y=ref[,"mean"], xout=x)$y sd <- approx(x=ref[,"x"], y=ref[,"sd"], xout=x)$y return((y-mean)/sd) } if (dist=="LMS") { check.names(df=ref, needed=c("x","L","M","S")) L <- approx(x=ref[,"x"], y=ref[,"L"], xout=x)$y M <- approx(x=ref[,"x"], y=ref[,"M"], xout=x)$y S <- approx(x=ref[,"x"], y=ref[,"S"], xout=x)$y z <- ifelse(L>0.01 | L<(-0.01), (((y/M)^L)-1)/(L*S), log(y/M)/S) if (tail.adjust) z <- adjust.tail(y, z, L, M, S) return(z) } if (dist=="BCCG") { check.names(df=ref, needed=c("x","nu","mu","sigma")) nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y return(qnorm(pBCCG(y, mu=mu, sigma=sigma, nu=nu))) } if (dist=="BCPE") { check.names(df=ref, needed=c("x","nu","mu","sigma","tau")) mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y tau <- approx(x=ref[,"x"], y=ref[,"tau"], xout=x)$y return(qnorm(pBCPE(y, mu=mu, sigma=sigma, nu=nu, tau=tau))) } if (dist=="BCT") { check.names(df=ref, needed=c("x","nu","mu","sigma","tau")) mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y tau <- approx(x=ref[,"x"], y=ref[,"tau"], xout=x)$y return(qnorm(pBCT(y, mu=mu, sigma=sigma, nu=nu, tau=tau))) } if (dist=="BCCG") { check.names(df=ref, needed=c("x","mu","sigma","nu")) mu <- approx(x=ref[,"x"], y=ref[,"mu"], xout=x)$y sigma <- approx(x=ref[,"x"], y=ref[,"sigma"], xout=x)$y nu <- approx(x=ref[,"x"], y=ref[,"nu"], xout=x)$y return() #return(ifelse(L>0.01|L<(-0.01),(((y/M)^L)-1)/(L*S),log(y/M)/S)) } stop(paste("Reference type", dist, "not implemented.")) } if (!is.data.frame(ref)) stop("'ref' should be a data frame.") n <- length(y) if (n < 1) stop("'y' must have 1 or more values") if(!is.vector(y)) return(as.numeric(rep(NA, n))) if(!is.numeric(y)) return(as.numeric(rep(NA, n))) x <- rep(x, length.out=length(y)) sex <- rep(sex, length.out=length(y)) sub <- rep(sub, length.out=length(y)) dist <- match.arg(dist, choices=c("NO","LMS","BCCG","BCPE","BCT")) # available levels in ref: sex, sub lev.sex <- levels(ref$sex[, drop=TRUE]) lev.sub <- levels(ref$sub[, drop=TRUE]) # replace nomatching levels idx <- is.na(match(sub, lev.sub)) if (any(idx)) { sub[idx] <- sub.fallback if (!is.na(sub.fallback)) warning("Entries (n=",sum(idx),") replaced by '",sub.fallback,"'",sep="") } idx <- is.na(match(sex, lev.sex)) if (any(idx)) { sex[idx] <- sex.fallback if (!is.na(sex.fallback)) warning("Entries (n=",sum(idx),") replaced by '",sex.fallback,"'",sep="") } refs <- with(ref,split(ref, f=list(sub, sex)), drop = TRUE) xs <- split(x,list(sub, sex), drop = TRUE) ys <- split(y,list(sub, sex), drop = TRUE) zs <- vector("list",length(ys)) names(zs) <- names(ys) for(i in 1:length(ys)) { name <- names(ys)[i] if(is.null(refs[[name]])) ys[[name]] <- rep(NA,length=length(ys[[name]])) else zs[[name]] <- y2z.grp(y=ys[[name]], x=xs[[name]], ref=refs[[name]], dist=dist, tail.adjust = tail.adjust) } z <- unsplit(zs,f=list(sub,sex)) names(z) <- names(y) return(round(z, dec)) } check.names <- function(df, needed){ if (missing(df)) stop("required argument 'df' not found") if (missing(needed)) stop("required argument 'needed' not found") notfound <- is.na(match(needed, names(df))) if (any(notfound)) stop("Not found: ",paste(needed[notfound],collapse=", ")) } adjust.tail <- function(y, z, L, M, S){ idx <- !is.na(z) & z > 3 if (any(idx)) { sd3 <- ifelse(L>0.01|L<(-0.01), M*(1+L*S*3)^(1/L), M*exp(S*3)) sd2 <- ifelse(L>0.01|L<(-0.01), M*(1+L*S*2)^(1/L), M*exp(S*2)) z[idx] <- (3 + (y - sd3)/(sd3 - sd2))[idx] } idx <- !is.na(z) & z < (-3) if (any(idx)) { sd3 <- ifelse(L>0.01|L<(-0.01), M*(1+L*S*(-3))^(1/L), M*exp(S*(-3))) sd2 <- ifelse(L>0.01|L<(-0.01), M*(1+L*S*(-2))^(1/L), M*exp(S*(-2))) z[idx] <- (-3 + (y - sd3)/(sd2 - sd3))[idx] } return(z) }
/scratch/gouwar.j/cran-all/cranData/AGD/R/agd.r
# extractLMS, ageGrid #'Extracts LMS values from a gamlss object. #' #'Extract LMS values from a gamlss object for solutions that transform the age #'axis according to the M-curve. #' #'It is crucial that \code{t.age} in \code{data} correspond to exactly the same #'age transformation as used to fit the \code{gamlss} object. Age grid values #'beyond the range of \code{data$age} produce \code{NA} in the L, M and S #'values. Parameter \code{flatAge} should be one of the values of the age grid. #' #'@param fit A gamlss object containing the final fit on transformed age, #'\code{t.age}. #'@param data A data frame containing the original data, with both \code{age} #'and \code{t.age} #'@param sex A character vector indicating whether the fit applied to males #'\code{sex="M"} or females \code{sex="F"}. The default is \code{sex="M"}. #'@param grid A character vector indicating the desired age grid. See #'\code{ageGrid()} for possible options. The default is a #'\code{grid="classic"}, a grid of 59 age points. #'@param decimals A numerical vector of length 3 indicating the number of #'significant digits for rounding of the L, M and S curves, respectively. #'@param flatAge A scalar indicating the age beyond which the L, M and S values #'should be constant. The default (NULL) is not to flatten the curves. #'@return A data frame with rows corresponding to time points, and with the #'following columns: \code{sex},\code{x},\code{L},\code{M},\code{S}. #'@author Stef van Buuren, 2010 #'@keywords distribution #'@examples #' #'\dontrun{ #'# #'library(gamlss) #'boys <- boys7482 #' #'# calculate initial M curve #'data <- na.omit(boys[,1:2]) #'f0154 <- gamlss(hgt~cs(age,df=15,c.spar=c(-1.5,2.5)), #' sigma.formula=~cs(age,df=4,c.spar=c(-1.5,2.5)), #' data=data,family=NO, #' control=gamlss.control(n.cyc=3)) #' #'# calculate transformed age #'t.age <- fitted(lm(data$age~fitted(f0154))) #'t.age <- t.age - min(t.age) #'data.t <- data.frame(data,t.age=t.age) #' #'# calculate final solution #'f0106r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), #' sigma.formula=~cs(t.age,df=6,c.spar=c(-1.5,2.5)), #' data=data.t,family=NO, #' control=gamlss.control(n.cyc=3)) #' #'# extract the LMS reference table in the 'classic' age grid #'nl4.hgt.boys <- extractLMS(fit = f0106r, data=data.t, grid="compact", #' dec = c(0,2,5)) #'nl4.hgt.boys #' #' #'# flatten the reference beyond age 20Y (not very useful in this data) #'nl4.hgt.boys.flat <- extractLMS(fit = f0106r, data=data.t, flatAge=20) #'nl4.hgt.boys.flat #' #'# use log age transformation #'data.t <- data.frame(data, t.age = log(data$age)) #'f0106rlog <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), #' sigma.formula=~cs(t.age,df=6,c.spar=c(-1.5,2.5)), #' data=data.t,family=NO, #' control=gamlss.control(n.cyc=1)) #' #'nl4.hgt.boys.log <- extractLMS(fit = f0106rlog, data=data.t) #'nl4.hgt.boys.log #'} #'@export extractLMS <- function(fit, data, sex="M", grid="classic", decimals = c(4,4,4), flatAge = NULL) { # Extracts the LMS table after the 'Cole-transformation' # or any other transformation in t.age # fit final gamlss object # data should contain both age and t.age check.names(df=data, needed=c("age","t.age")) if (!is.gamlss(fit)) stop("fit not a gamlss object.") tm <- data$t.age[which.min(data$age)] # if (abs(tm)>0.0001) stop("wrong offset of transformed age") # if (min(data$t.age) < 0) warning("Negative transformed age found. Results are unpredictable.") grd <- ageGrid(grid) grid.age <- grd$year minage <- min(data$age, na.rm=TRUE) maxage <- max(data$age, na.rm=TRUE) outside <- grid.age < minage | grid.age > maxage grid.age <- grid.age[!outside] t.grid.age <- approx(x=data$age, y=data$t.age, xout=grid.age, ties=mean)$y if (length(t.grid.age)==0) stop("No overlap between age grid and data.") newdata <- data.frame(t.age=t.grid.age) # lms <- predictAll(fit, newdata=newdata, data=data) # print(lms) lms <- predictAll(fit, newdata=newdata, data=data) lms$mu <- round(lms$mu, decimals[2]) lms$sigma <- round(lms$sigma/lms$mu, decimals[3]) ## THIS IS PROBABLY AN ERROR!! 25/07/2013 if (length(lms)>2) lms$nu <- round(lms$nu, decimals[1]) else lms$nu <- 1 lms <- as.data.frame(lms) result <- data.frame(sex=sex, x=grd$year, L=NA, M=NA, S=NA) result[!outside, "L"] <- lms$nu result[!outside, "M"] <- lms$mu result[!outside, "S"] <- lms$sigma if (!is.null(flatAge)) { if (!(flatAge %in% result$x)) stop("FlatAge value (', FlatAge,') not found in age grid") flatLMS <- result[flatAge==result$x, c("L","M","S")] result[!outside & flatAge<result$x, c("L","M","S")] <- flatLMS } return(result) } #'Creates an age grid according to a specified format. #' #'Creates an age grid according to a specified format. #' #' #'@param grid A character string specifying one of the following: #'\code{"compact"}, \code{"classic"}, \code{"extensive"}, \code{"0-104w"}, #'\code{"0-24m"}, \code{"0-21y"}, \code{"0-21yd"} or \code{"0-21yc"}. The #'default is \code{"compact"}, which produces an age grid between 0 and 21 #'years with 95 points. #'@return A list with five components: \code{format}, \code{year}, #'\code{month}, \code{week} and \code{day} containing the age grid in different #'units. #'@author Stef van Buuren, 2010 #'@keywords distribution #'@examples #' #' #'age <- ageGrid("classic")$year #' #'@export ageGrid <- function(grid="compact"){ formats <- c("compact", "classic", "extensive", "0-104w", "0-24m", "0-21y", "0-21yd", "0-21yc") fmi <- pmatch(grid, formats) if (is.na(fmi)) stop("Grid format ",grid," unknown.") grid <- switch(fmi, compact = c((0:14)/365.25, (3:13)*7/365.25, seq(3,11.5,0.5)/12, (12:23)/12, seq(2, 21, 0.5) ), classic = { grid.weeks <- c(0,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,22,24,26, 28,32,36,40,44,48,52,56,60,64) c(grid.weeks*7/365.25,seq(1.5,21,0.5)) }, extensive = (0:(365.25*21+1))/365.25, week = (0:104)*7/365.25, month = (0:24)/12, year = 0:21, dyear = seq(0, 21, 0.1), cyear = seq(0, 21, 0.01) ) year <- round(grid, 4) month <- round(grid*12, 4) week <- round(grid*365.25/7, 4) day <- round(grid*365.25, 4) return(list(format = formats[fmi], year = year, month = month, week = week, day = day)) }
/scratch/gouwar.j/cran-all/cranData/AGD/R/extractLMS.r
#'Superposes two worm plots #' #'Superposes two worm plots from GAMLSS fitted objects. This is a diagnostic #'tool for comparing two solutions. #' #'This function is a customized version of the \code{wp()} function found in #'the \code{gamlss} package. Function \code{wp.twin()} allows overplotting of #'two worm plots, each in its own color. The points of \code{obj1} are plotted #'first, the points of \code{obj2} are superposed. This twin worm plot provide #'a visual assessment of the differences between the solutions. Extra #'arguments can be specified (e.g. \code{xvar}) that are passed down to the #'\code{wp()} function of \code{gamlss} if specified. The worm plot is a #'detrended normal QQ-plot that highlight departures from normality. #' #'Argument \code{xvar} takes priority over \code{xvar.column}. The \code{xvar} #'variable is cut into \code{n.iter} intervals with an equal number #'observations and detrended normal QQ (i.e. worm) plots for each interval are #'plotted. This is a way of highlighting failures of the model within #'different ranges of the explanatory variable. #' #'If \code{line=TRUE} and \code{n.inter>1}, the fitted coefficients from #'fitting cubic polynomials to the residuals (within each x-variable interval) #'can be obtain by e.g. \code{coeffs<-wp.twin(model1,xvar=x,n.iner=9)}. van #'Buuren \emph{et al.} (2001) used these residuals to identify regions #'(intervals) of the explanatory variable within which the model does not fit #'adequately the data (called "model violation") #' #'@param obj1 a GAMLSS fitted object #'@param obj2 an optional second GAMLSS fitted object #'@param xvar the explanatory variable against which the worm plots will be #'plotted #'@param xvar.column the number referring to the column of \code{obj1$mu.x} and #'\code{obj2$mu.x}. If \code{xvar=NULL} then the explanatory variable is set #'to \code{xvar=obj1$mu.x[,xvar.column]} respectively #'\code{xvar=obj2$mu.x[,xvar.column]}. The default is \code{xvar.column=2}, #'which selects the variable following the intercept (which is typically age in #'most applications). #'@param n.inter the number of intervals in which the explanatory variable #'\code{xvar} will be cut. The default is 16. #'@param show.given whether to show the x-variable intervals in the top of the #'graph, default is \code{show.given=FALSE} #'@param ylim.worm for multiple plots, this values is the y-variable limit, #'default value is \code{ylim.worm=0.5} #'@param line whether to plot the polynomial line in the worm plot, default #'value is \code{line=FALSE} #'@param cex the cex plotting parameter with default \code{cex=1} #'@param col1 the color for the points of \code{obj1}. The default #'\code{col="black"} #'@param col2 the color for the points of \code{obj2}. The default #'\code{col="orange"} #'@param warnings a logical indicating whether warnings should be produced. The #'default \code{warnings=FALSE} #'@param \dots for extra arguments, \code{overlap}, \code{xlim.worm} or #'\code{pch} #'@return For multiple plots the \code{xvar} intervals and the coefficients of #'the fitted cubic polynomials to the residuals (within each \code{xvar} #'interval) are returned. #'@author Stef van Buuren, using R code of Mikis Stasinopoulos and Bob Rigby #'@seealso \code{\link{wp}} #'@references Stasinopoulos D. M. Rigby R.A. (2007) Generalized additive models #'for location scale and shape (GAMLSS) in R. \emph{Journal of Statistical #'Software}, Vol. \bold{23}, Issue 7, Dec 2007, #'\url{http://www.jstatsoft.org/v23/i07}. #' #'van Buuren and Fredriks M. (2001) Worm plot: simple diagnostic device for #'modelling growth reference curves. \emph{Statistics in Medicine}, \bold{20}, #'1259--1277. #' #'van Buuren and Fredriks M. (2007) Worm plot to diagnose fit in quantile #'regression. \emph{Statistical Modelling}, \bold{7}, 4, 363--376. #' #'@keywords smooth #'@examples #' #'library(gamlss) #'data(abdom) #'a <- gamlss(y~cs(x,df=1),sigma.fo=~cs(x,0),family=LO,data=abdom) #'b <- gamlss(y~cs(x,df=3),sigma.fo=~cs(x,1),family=LO,data=abdom) #'coeff1 <- wp.twin(a,b,line=TRUE) #'coeff1 #'rm(a,b,coeff1) #'@export wp.twin <- function(obj1, obj2=NULL, xvar=NULL, xvar.column=2, n.inter=16, show.given=FALSE, ylim.worm=0.5, line=FALSE, cex=1, col1="black", col2="orange", warnings=FALSE, ...) { coplot2 <- function (formula, data, given.values, panel = points, rows, columns, show.given = TRUE, col = par("fg"), pch = par("pch"), bar.bg = c(num = gray(0.8), fac = gray(0.95)), xlab = c(x.name, paste("Given :", a.name)), ylab = c(y.name, paste("Given :", b.name)), subscripts = FALSE, axlabels = function(f) abbreviate(levels(f)), number = 6, overlap = 0.5, xlim, ylim, overplot = FALSE, ...) { deparen <- function(expr) { while (is.language(expr) && !is.name(expr) && deparse(expr[[1L]])[1L] == "(") expr <- expr[[2L]] expr } bad.formula <- function() stop("invalid conditioning formula") bad.lengths <- function() stop("incompatible variable lengths") getOp <- function(call) deparse(call[[1L]], backtick = FALSE)[[1L]] formula <- deparen(formula) if (!inherits(formula, "formula")) bad.formula() y <- deparen(formula[[2L]]) rhs <- deparen(formula[[3L]]) if (getOp(rhs) != "|") bad.formula() x <- deparen(rhs[[2L]]) rhs <- deparen(rhs[[3L]]) if (is.language(rhs) && !is.name(rhs) && getOp(rhs) %in% c("*", "+")) { have.b <- TRUE a <- deparen(rhs[[2L]]) b <- deparen(rhs[[3L]]) } else { have.b <- FALSE a <- rhs } if (missing(data)) data <- parent.frame() x.name <- deparse(x) x <- eval(x, data, parent.frame()) nobs <- length(x) y.name <- deparse(y) y <- eval(y, data, parent.frame()) if (length(y) != nobs) bad.lengths() a.name <- deparse(a) a <- eval(a, data, parent.frame()) if (length(a) != nobs) bad.lengths() if (is.character(a)) a <- as.factor(a) a.is.fac <- is.factor(a) if (have.b) { b.name <- deparse(b) b <- eval(b, data, parent.frame()) if (length(b) != nobs) bad.lengths() if (is.character(b)) b <- as.factor(b) b.is.fac <- is.factor(b) missingrows <- which(is.na(x) | is.na(y) | is.na(a) | is.na(b)) } else { missingrows <- which(is.na(x) | is.na(y) | is.na(a)) b <- NULL b.name <- "" } number <- as.integer(number) if (length(number) == 0L || any(number < 1)) stop("'number' must be integer >= 1") if (any(overlap >= 1)) stop("'overlap' must be < 1 (and typically >= 0).") bad.givens <- function() stop("invalid 'given.values'") if (missing(given.values)) { a.intervals <- if (a.is.fac) { i <- seq_along(a.levels <- levels(a)) a <- as.numeric(a) cbind(i - 0.5, i + 0.5) } else co.intervals(unclass(a), number = number[1L], overlap = overlap[1L]) b.intervals <- if (have.b) { if (b.is.fac) { i <- seq_along(b.levels <- levels(b)) b <- as.numeric(b) cbind(i - 0.5, i + 0.5) } else { if (length(number) == 1L) number <- rep.int(number, 2) if (length(overlap) == 1L) overlap <- rep.int(overlap, 2) co.intervals(unclass(b), number = number[2L], overlap = overlap[2L]) } } } else { if (!is.list(given.values)) given.values <- list(given.values) if (length(given.values) != (if (have.b) 2L else 1L)) bad.givens() a.intervals <- given.values[[1L]] if (a.is.fac) { a.levels <- levels(a) if (is.character(a.intervals)) a.intervals <- match(a.intervals, a.levels) a.intervals <- cbind(a.intervals - 0.5, a.intervals + 0.5) a <- as.numeric(a) } else if (is.numeric(a)) { if (!is.numeric(a.intervals)) bad.givens() if (!is.matrix(a.intervals) || ncol(a.intervals) != 2) a.intervals <- cbind(a.intervals - 0.5, a.intervals + 0.5) } if (have.b) { b.intervals <- given.values[[2L]] if (b.is.fac) { b.levels <- levels(b) if (is.character(b.intervals)) b.intervals <- match(b.intervals, b.levels) b.intervals <- cbind(b.intervals - 0.5, b.intervals + 0.5) b <- as.numeric(b) } else if (is.numeric(b)) { if (!is.numeric(b.intervals)) bad.givens() if (!is.matrix(b.intervals) || ncol(b.intervals) != 2) b.intervals <- cbind(b.intervals - 0.5, b.intervals + 0.5) } } } if (any(is.na(a.intervals)) || (have.b && any(is.na(b.intervals)))) bad.givens() if (have.b) { rows <- nrow(b.intervals) columns <- nrow(a.intervals) nplots <- rows * columns if (length(show.given) < 2L) show.given <- rep.int(show.given, 2L) } else { nplots <- nrow(a.intervals) if (missing(rows)) { if (missing(columns)) { rows <- ceiling(round(sqrt(nplots))) columns <- ceiling(nplots/rows) } else rows <- ceiling(nplots/columns) } else if (missing(columns)) columns <- ceiling(nplots/rows) if (rows * columns < nplots) stop("rows * columns too small") } total.columns <- columns total.rows <- rows f.col <- f.row <- 1 if (show.given[1L]) { total.rows <- rows + 1 f.row <- rows/total.rows } if (have.b && show.given[2L]) { total.columns <- columns + 1 f.col <- columns/total.columns } mar <- if (have.b) rep.int(0, 4) else c(0.5, 0, 0.5, 0) oma <- c(5, 6, 5, 4) if (have.b) { oma[2L] <- 5 if (!b.is.fac) oma[4L] <- 5 } if (a.is.fac && show.given[1L]) oma[3L] <- oma[3L] - 1 opar <- par(mfrow = c(total.rows, total.columns), oma = oma, mar = mar, xaxs = "r", yaxs = "r") on.exit(par(opar)) if (!overplot) plot.new() if (missing(xlim)) xlim <- range(as.numeric(x), finite = TRUE) if (missing(ylim)) ylim <- range(as.numeric(y), finite = TRUE) pch <- rep(pch, length.out = nobs) col <- rep(col, length.out = nobs) do.panel <- function(index, subscripts = FALSE, id) { Paxis <- function(side, x) { if (nlevels(x)) { lab <- axlabels(x) axis(side, labels = lab, at = seq(lab), xpd = NA) } else Axis(x, side = side, xpd = NA) } istart <- (total.rows - rows) + 1 i <- total.rows - ((index - 1)%/%columns) j <- (index - 1)%%columns + 1 par(mfg = c(i, j, total.rows, total.columns)) if (!overplot) plot.new() plot.window(xlim, ylim) if (any(is.na(id))) id[is.na(id)] <- FALSE if (any(id)) { grid(lty = "solid") if (subscripts) panel(x[id], y[id], subscripts = id, col = col[id], pch = pch[id], ...) else panel(x[id], y[id], col = col[id], pch = pch[id], ...) } if ((i == total.rows) && (j%%2 == 0)) Paxis(1, x) else if ((i == istart || index + columns > nplots) && (j%%2 == 1)) Paxis(3, x) if ((j == 1) && ((total.rows - i)%%2 == 0)) Paxis(2, y) else if ((j == columns || index == nplots) && ((total.rows - i)%%2 == 1)) Paxis(4, y) box() } if (have.b) { count <- 1 for (i in 1L:rows) { for (j in 1L:columns) { id <- ((a.intervals[j, 1] <= a) & (a <= a.intervals[j, 2]) & (b.intervals[i, 1] <= b) & (b <= b.intervals[i, 2])) do.panel(count, subscripts, id) count <- count + 1 } } } else { for (i in 1L:nplots) { id <- ((a.intervals[i, 1] <= a) & (a <= a.intervals[i, 2])) do.panel(i, subscripts, id) } } mtext(xlab[1L], side = 1, at = 0.5 * f.col, outer = TRUE, line = 3.5, xpd = NA, font = par("font.lab"), cex = par("cex.lab")) mtext(ylab[1L], side = 2, at = 0.5 * f.row, outer = TRUE, line = 3.5, xpd = NA, font = par("font.lab"), cex = par("cex.lab")) if (length(xlab) == 1L) xlab <- c(xlab, paste("Given :", a.name)) if (show.given[1L]) { par(fig = c(0, f.col, f.row, 1), mar = mar + c(3 + (!a.is.fac), 0, 0, 0), new = TRUE) if (!overplot) plot.new() nint <- nrow(a.intervals) a.range <- range(a.intervals, finite = TRUE) plot.window(a.range + c(0.03, -0.03) * diff(a.range), 0.5 + c(0, nint)) rect(a.intervals[, 1], 1L:nint - 0.3, a.intervals[, 2], 1L:nint + 0.3, col = bar.bg[if (a.is.fac) "fac" else "num"]) if (a.is.fac) { text(apply(a.intervals, 1L, mean), 1L:nint, a.levels) } else { Axis(a, side = 3, xpd = NA) axis(1, labels = FALSE) } box() mtext(xlab[2L], 3, line = 3 - a.is.fac, at = mean(par("usr")[1L:2]), xpd = NA, font = par("font.lab"), cex = par("cex.lab")) } else { mtext(xlab[2L], 3, line = 3.25, outer = TRUE, at = 0.5 * f.col, xpd = NA, font = par("font.lab"), cex = par("cex.lab")) } if (have.b) { if (length(ylab) == 1L) ylab <- c(ylab, paste("Given :", b.name)) if (show.given[2L]) { par(fig = c(f.col, 1, 0, f.row), mar = mar + c(0, 3 + (!b.is.fac), 0, 0), new = TRUE) if (!overplot) plot.new() nint <- nrow(b.intervals) b.range <- range(b.intervals, finite = TRUE) plot.window(0.5 + c(0, nint), b.range + c(0.03, -0.03) * diff(b.range)) rect(1L:nint - 0.3, b.intervals[, 1], 1L:nint + 0.3, b.intervals[, 2], col = bar.bg[if (b.is.fac) "fac" else "num"]) if (b.is.fac) { text(1L:nint, apply(b.intervals, 1L, mean), b.levels, srt = 90) } else { Axis(b, side = 4, xpd = NA) axis(2, labels = FALSE) } box() mtext(ylab[2L], 4, line = 3 - b.is.fac, at = mean(par("usr")[3:4]), xpd = NA, font = par("font.lab"), cex = par("cex.lab")) } else { mtext(ylab[2L], 4, line = 3.25, at = 0.5 * f.row, outer = TRUE, xpd = NA, font = par("font.lab"), cex = par("cex.lab")) } } if (length(missingrows)) { cat("\n", gettext("Missing rows"), ": ", missingrows, "\n", sep = "") invisible(missingrows) } } wp2 <- function (object, xvar = NULL, n.inter = 4, xcut.points = NULL, overlap = 0, xlim.all = 4, xlim.worm = 3.5, show.given = TRUE, line = TRUE, ylim.all = 12 * sqrt(1/length(fitted(object))), ylim.worm = 12 * sqrt(n.inter/length(fitted(object))), cex = 1, pch = 21, overplot = FALSE, mline=3, color=col("col"), ...) { panel.fun <- function(x, y, col = par("col"), pch = par("pch"), cex = par("cex"), col.smooth = "red", span = 2/3, iter = 3, ...) { qq <- as.data.frame(qqnorm(y, plot = FALSE)) qq$y <- qq$y - qq$x grid(nx = NA, ny = NA, lwd = 2) points(qq$x, qq$y, pch = pch, col = col, bg = col, cex = cex) abline(0, 0, lty = 2, col = 1) abline(0, 1e+05, lty = 2, col = 1) yuplim <- 10 * sqrt(1/length(qq$y)) level <- 0.95 lz <- -xlim.worm hz <- xlim.worm dz <- 0.25 z <- seq(lz, hz, dz) p <- pnorm(z) se <- (1/dnorm(z)) * (sqrt(p * (1 - p)/length(qq$y))) low <- qnorm((1 - level)/2) * se high <- -low { no.points <- length(qq$y) total.points <<- total.points + no.points no.mis <- sum(abs(qq$y) > ylim.worm) warning(paste("number of missing points from plot=", no.mis, " out of ", no.points, "\n",sep="")) if (any(abs(qq$y) > ylim.worm)) warning("Some points are missed out ", "\n", "increase the y limits using ylim.worm") } if (any(abs(qq$x) > xlim.worm)) { warning("Some points are missed out ", "\n", "increase the x limits using xlim.worm") } lines(z, low, lty = 2, lwd = 0.01) lines(z, high, lty = 2, lwd = 0.01) if (line == TRUE) { fit <- lm(qq$y ~ qq$x + I(qq$x^2) + I(qq$x^3)) s <- spline(qq$x, fitted(fit)) flags <- s$x > -2.5 & s$x < 2.5 lines(list(x = s$x[flags], y = s$y[flags]), col = col.smooth, lwd = 0.01) coef1 <- coef(fit) assign("coef1", coef1, envir = parent.frame(n = 3)) assign("coefall", c(coefall, coef1), envir = parent.frame(n = 3)) } } check.overlap <- function(interval) { if (!is.matrix(interval)) { stop(paste("The interval specified is not a matrix.")) } if (dim(interval)[2] != 2) { stop(paste("The interval specified is not a valid matrix.\nThe number of columns should be equal to 2.")) } crows = dim(interval)[1] for (i in 1:(crows - 1)) { if (!(abs(interval[i, 2] - interval[i + 1, 1]) < 1e-04)) { interval[i + 1, 1] = interval[i, 2] } } return(interval) } get.intervals <- function(xvar, xcut.points) { if (!is.vector(xcut.points)) { stop(paste("The interval is not a vector.")) } if (any((xcut.points < min(xvar)) | any(xcut.points > max(xvar)))) { stop(paste("The specified `xcut.points' are not within the range of the x: (", min(xvar), " , ", max(xvar), ")")) } extra <- (max(xvar) - min(xvar))/1e+05 int <- c(min(xvar), xcut.points, (max(xvar) + 2 * extra)) ii <- 1:(length(int) - 1) r <- 2:length(int) x1 <- int[ii] xr <- int[r] - extra if (any(x1 > xr)) { stop(paste("The interval is are not in a increasing order.")) } cbind(x1, xr) } if (!is.gamlss(object)) stop(paste("This is not an gamlss object", "\n", "")) if (is.null(xvar)) { qq <- as.data.frame(qqnorm(resid(object), plot = FALSE)) qq$y <- qq$y - qq$x level <- 0.95 lz <- -xlim.all hz <- xlim.all dz <- 0.25 z <- seq(lz, hz, dz) p <- pnorm(z) se <- (1/dnorm(z)) * (sqrt(p * (1 - p)/length(qq$y))) low <- qnorm((1 - level)/2) * se high <- -low if (any(abs(qq$y) > ylim.all)) { warning("Some points are missed out ", "\n", "increase the y limits using ylim.all") } if (any(abs(qq$x) > xlim.all)) { warning("Some points are missed out ", "\n", "increase the x limits using xlim.all") } plot(qq$x, qq$y, ylab = "Deviation", xlab = "", xlim = c(-xlim.all, xlim.all), ylim = c(-ylim.all, ylim.all), cex = cex, pch = pch, bg = "wheat", ) grid(lty = "solid") abline(0, 0, lty = 2, col = 2) abline(0, 1e+05, lty = 2, col = 2) lines(z, low, lty = 2) lines(z, high, lty = 2) if (line == TRUE) { fit <- lm(qq$y ~ qq$x + I(qq$x^2) + I(qq$x^3)) s <- spline(qq$x, fitted(fit)) flags <- s$x > -3 & s$x < 3 lines(list(x = s$x[flags], y = s$y[flags]), col = color, lwd = 0.01) } } else { w <- object$weights if (all(trunc(w) == w)) xvar <- rep(xvar, w) if (is.null(xcut.points)) { given.in <- co.intervals(xvar, number = n.inter, overlap = overlap) if (overlap == 0) given.in <- check.overlap(given.in) } else { given.in <- get.intervals(xvar, xcut.points) } total.points <- 0 coefall <- coef1 <- NULL y <- resid(object) x <- resid(object) coplot2(y ~ x | xvar, given.values = given.in, panel = panel.fun, ylim = c(-ylim.worm, ylim.worm), xlim = c(-xlim.worm, xlim.worm), ylab = "Deviation", xlab = "", show.given = show.given, bg = "wheat", pch = pch, cex = cex, bar.bg = c(num = "light blue"), overplot = overplot, col=color, ...) mtext(paste(object$call[c(-1,-length(object$call))],collapse=" "), side=1, line=mline, col=color) if (overlap == 0) { if (total.points != length(y)) warning("the total number of points in the plot is not equal \n to the number of observations in y \n") } } if (!is.null(xvar) & line) { mcoef <- matrix(coefall, ncol = 4, byrow = TRUE) out <- list(classes = given.in, coef = mcoef) } } is.valid <- function(xvar.column, obj) { return (xvar.column >= 1 & xvar.column <= ncol(obj$mu.x)) } # ---- ENTRY POINT call <- match.call() if (!is.gamlss(obj1)) stop(paste("Argument obj1 is not an gamlss object", "\n", "")) if ((!is.gamlss(obj2)) & (!is.null(obj2))) stop(paste("Argument obj2 is not an gamlss object", "\n", "")) expr1 <- expression( wp2(obj1, xvar=xvar, n.inter=n.inter, show.given=show.given, ylim.worm=ylim.worm, cex=cex, color=col1, col.smooth=col1, overplot=FALSE, line=line, mline=3, ...) ) expr2 <- expression( wp2(obj2, xvar=xvar, n.inter=n.inter, show.given=show.given, ylim.worm=ylim.worm, cex=cex, color=col2, col.smooth=col2, overplot=TRUE, line=line, mline=4, ...) ) if (is.null(call$xvar)) { if (n.inter > 1) { if (!is.valid(xvar.column, obj1)) { n.inter <- 1 if (warnings) warning("Parameter n.inter changed to 1 because xvar.column is out-of-range.") } else { xvar <- obj1$mu.x[,xvar.column] if (min(xvar)==max(xvar)) stop("xvar is constant") } } } if (!warnings) suppressWarnings(eval(expr1)) else eval(expr1) if (!is.null(call$obj2)) { if (is.null(call$xvar)) { if (n.inter > 1) { if (!is.valid(xvar.column, obj2)) { n.inter <- 1 if (warnings) warning("Parameter n.inter changed to 1 because xvar.column is out-of-range.") } else { xvar <- obj2$mu.x[,xvar.column] if (min(xvar)==max(xvar)) stop("xvar is constant") } } } if (!warnings) suppressWarnings(eval(expr2)) else eval(expr2) } }
/scratch/gouwar.j/cran-all/cranData/AGD/R/wp.twin.r
# course.r # library(gamlss) library(AGD) boys <- boys7482 setEPS(horizontal = FALSE, onefile = FALSE, paper = "special") old.par <- par(mar=c(1,1,1,1), col="darkblue") dir <- path.expand("~/Documents/Sync/Groeistat/NIHES/2012/Slides/Session1Figures/") # scatterplot hgt - age postscript(paste(dir,"hgtage1.eps",sep="")) par(mar=c(2,2,2,2), col="darkblue") with(boys, plot(age, hgt)) dev.off() # simple linear model fit <- lm(hgt~age, data=boys) summary(fit) # plot diagnostics postscript(paste(dir,"fit1_%01d.eps",sep="")) par(mar=c(2,2,2,2), col="darkblue") plot(fit) dev.off() # add quadratic fit <- lm(hgt~age+I(age^2), data=boys, na.action=na.exclude) summary(fit) postscript(paste(dir,"fit2_%01d.eps",sep="")) par(mar=c(2,2,2,2), col="darkblue") plot(fit) dev.off() postscript(paste(dir,"hgtage2.eps",sep="")) par(mar=c(2,2,2,2), col="darkblue") with(boys, plot(age, hgt)) sigma <- summary(fit)$sigma lines(boys$age, predict(fit), col="red") lines(boys$age, predict(fit)+2*sigma, col="green") lines(boys$age, predict(fit)-2*sigma, col="green") dev.off() # semi-parametric model inner <- c(7/365.25, 1/3, 1, 2, 4, 10, 14, 18, 21) xb <- bs(boys$age, knots=inner, B=c(0,23), deg=1) xb[1:3,] fit <- lm(hgt ~ xb, data=boys, na.action=na.exclude) postscript(paste(dir,"bspline1.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") with(boys, plot(age, hgt)) lines(boys$age, predict(fit), col="red", lwd=3) dev.off() # include fitted knots postscript(paste(dir,"bspline2.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") with(boys, plot(age, hgt)) lines(boys$age, predict(fit), col="red", lwd=3) est <- coef(fit) xy <- xy.coords(x=inner[1:9], y=(est[1]+est)[c(-1,-11)]) points(xy, col="yellow", pch=20, cex=2.5) dev.off() # include +2SD and -2SD lines postscript(paste(dir,"bspline3.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") with(boys, plot(age, hgt)) lines(boys$age, predict(fit), col="red", lwd=3) est <- coef(fit) xy <- xy.coords(x=inner[1:9], y=(est[1]+est)[c(-1,-11)]) points(xy, col="yellow", pch=20, cex=2.5) sigma <- summary(fit)$sigma lines(boys$age, predict(fit)+2*sigma, col="green", lwd=3) lines(boys$age, predict(fit)-2*sigma, col="green", lwd=3) dev.off() myref <- data.frame(sex = "M", sub = "N", x = round(xy$x,4), mean = round(xy$y,2), sd = round(sigma,2)) # calculate SDS of length 80cm, 1 years old boy # relative to your new reference y2z(y=80, x=1, ref=myref, dist="NO") # relative to Dutch reference (default) y2z(y=80,x=1) # relative to CDC reference y2z(y=80,x=1,ref=cdc.hgt) # calculate PERCENTILE of length 80cm, 1 years old boy # relative to your new reference 100*pnorm(y2z(y=80,x=1,ref=myref,dist="NO")) # relative to Dutch reference 100*pnorm(y2z(y=80,x=1)) # relative to CDC reference 100*pnorm(y2z(y=80,x=1,ref=cdc.hgt)) # calculate PERCENTILE of length 80cm, 1 years old GIRL # relative to your new reference # The warning indicates that there are no female references 100*pnorm(y2z(y=80,x=1,sex="F",ref=myref,dist="NO")) # relative to Dutch reference 100*pnorm(y2z(y=80,x=1,sex="F",ref=nl4.hgt)) # relative to CDC reference 100*pnorm(y2z(y=80,x=1,sex="F",ref=cdc.hgt)) # SDS of IOTF BMI cut-off value for overweight (boys 2-18) # relative to Dutch boys reference cutoff <- c( 18.41, 18.15, 17.89, 17.72, 17.55, 17.49, 17.42, 17.49, 17.55, 17.74, 17.92, 18.18, 18.44, 18.77, 19.10, 19.47, 19.84, 20.20, 20.55, 20.89, 21.22, 21.57, 21.91, 22.27, 22.62, 22.96, 23.29, 23.60, 23.90, 24.18, 24.46, 24.73, 25.00) age <- seq(2, 18, by=0.5) percent <- 100-100*pnorm(y2z(y=cutoff, x=age, sex="M", ref=nl4.bmi)) postscript(paste(dir,"iotf1.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") plot(age, percent, type='l',ylim=c(0,20),lwd=2,col="red") title("Overweight prevalence Dutch boys 1997") dev.off() # percentage of children lighter than 15kg at ages 2-5 e <- expand.grid(age=2:5, sex=c("M","F")) z <- y2z(y=rep(15,nrow(e)), x=e$age, sex=e$sex, ref=nl4.wgt) w <- matrix(100*round(pnorm(z),2), nrow=2, byrow=TRUE) dimnames(w) <- list(c("boys","girls"),2:5) w # analysis in Z scale hgt.z <- y2z(boys$hgt, boys$age, sex="M", ref=nl4.hgt) wgt.z <- y2z(boys$wgt, boys$age, sex="M", ref=nl4.wgt) postscript(paste(dir,"hgtwgt1.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") plot(hgt.z, wgt.z) dev.off() # standard set of Z-scores of weight for all tabulated ages, boys & girls sds <- c(-2.5, -2, -1, 0, 1, 2, 2.5) age <- nl4.wgt$x z <- rep(sds, times=length(age)) x <- rep(age, each=length(sds)) sex <- rep(c("M","F"), each=length(z)/2) w <- z2y(z=z, x=x, sex=sex, ref=nl4.wgt) w <- matrix(w, ncol=length(sds), byrow=TRUE) dimnames(w) <- list(age, sds) postscript(paste(dir,"wfa1.eps",sep="")) par(mar=c(4,4,1,1), col="darkblue") matplot(x=age[1:69], y=w[1:69,], xlim=c(0,21), type="l", lty=1, lwd=2, col=c("red","green","gray","blue","gray","green","red"), xlab="Age (years)", ylab="Weight (kg)") dev.off() # interpolate standard to days days <- 0:15 sds <- c(-2, 0, +2) z <- rep(sds, length(days)) x <- rep(round(days/365.25,4), each=length(sds)) w <- z2y(z, x, sex="M", ref=nl4.hgt) w <- matrix(w, ncol=length(sds), byrow=TRUE) dimnames(w) <- list(days, sds) w # --- SESSION 3 # --- DISTRIBUTIONS par(old.par) par(cex=1.5, lwd=1.2, col="darkblue") dir <- path.expand("~/Documents/Sync/Groeistat/NIHES/2012/Slides/Session3Figures/") lwd <- 1.5 cex <- 1.2 # boys distribution 10 years olds nl4.hgt[46:48,] m <- nl4.hgt[47,"M"] m sd <- nl4.hgt[47,"S"]*m sd # normal distribution y <- 120:170 d <- dNO(y, mu=m, sigma=sd) # NOTE: use dNO, alternative to dnorm postscript(paste(dir,"f_dnorm.eps",sep="")) par(lwd=lwd, cex=cex) plot(y, d, type = 'l', col = 'blue', xlab="Measurement",ylab="Density") dev.off() # cumulative normal distribution y <- 120:170 p <- pNO(y, mu=m, sigma=sd) postscript(paste(dir,"f_pnorm.eps",sep="")) par(lwd=lwd, cex=cex) plot(y, p, type = 'l', col = 'blue', xlab="Measurement", ylab="Probability") dev.off() # normal quantiles p <- seq(0.001, 0.999, by =0.001) y <- qNO(p) # NOTE: uses qNO of gamlss instead of qnorm postscript(paste(dir,"f_qnorm.eps",sep="")) par(lwd=lwd, cex=cex) plot(p, y, type = 'l', col = 'blue', xlab="Probability", ylab="Measurement") dev.off() # with data boys <- boys7482 y <- boys[boys$age>9.5 & boys$age<10.5,"hgt"] postscript(paste(dir,"f_hnorm1.eps",sep="")) par(lwd=lwd, cex=cex) hist(y, xlab="Height (cm)", main="") dev.off() # play with histogram postscript(paste(dir,"f_hnorm2.eps",sep="")) par(lwd=lwd, cex=cex) b <- seq(120,166,2) hist(y, xlab="Height (cm)", main="", breaks=b, col='gray', border='white') dev.off() # add normal references postscript(paste(dir,"f_hnorm3.eps",sep="")) par(lwd=lwd, cex=cex) hist(y, xlab="Height (cm)", main="", breaks=b, col='gray', border='white',freq=F) lines(120:166, dNO(120:166,mu=m,sig=sd), col = 'blue', lwd = 2) dev.off() # postscript(paste(dir,"f_hnorm4.eps",sep="")) par(lwd=lwd, cex=cex) b <- seq(120,166,2) hist(y, xlab="Height (cm)", main="", breaks=b, col='gray', border='white',freq=F) lines(120:166, dNO(120:166,mu=m,sig=sd), col = 'blue') lines(120:166, dNO(120:166,mu=140.8,sig=0.0454*140.8), col = 'red', lty=2) lines(120:166, dNO(120:166,mu=145.7,sig=0.0465*145.7), col = 'red', lty=2) dev.off() # empirical cumulative distribution y <- boys[boys$age>9.5 & boys$age<10.5,"hgt"] n <- length(y) p <- ((1:n) - 0.5) / n # Empirical probabilities postscript(paste(dir,"f_cumnorm.eps",sep="")) par(lwd=lwd, cex=cex) plot(sort(y), p, type = 's', col = 'red', xlab="Height (cm)") dev.off() # adding the theoretical cumulative distribution n <- length(y) p <- ((1:n) - 0.5) / n # Empirical probabilities postscript(paste(dir,"f_cumnorm2.eps",sep="")) par(lwd=lwd, cex=cex) plot(sort(y), p, type = 's', col = 'red', xlab="Height (cm)") lines(120:170, pNO(120:170,m=m,sigma=sd), col = 'blue') dev.off() # Box-Cox transformation y <- 20:70 postscript(paste(dir,"f_bc1.eps",sep="")) par(lwd=lwd, cex=cex) plot(function(y) dBCCG(y, mu=34,sigma=.16,nu=-1), 20, 70, ylab="Density", xlab="Weight (kg)", col="red") dev.off() # vary the nu parameter postscript(paste(dir,"f_bc2.eps",sep="")) par(lwd=lwd, cex=cex) plot(function(y) dBCCG(y, mu=34,sigma=.16,nu=-1), 20, 70, ylab="Density", xlab="Weight (kg)", col="red") lines(x=y, y=dBCCG(y, mu=34,sigma=.16,nu=0),col="green") lines(x=y, y=dBCCG(y, mu=34,sigma=.16,nu=1),col="black") lines(x=y, y=dBCCG(y, mu=34,sigma=.16,nu=2),col="blue") dev.off() # now with data y <- boys[boys$age>9.5 & boys$age<10.5,"wgt"] b <- seq(20,70,2) postscript(paste(dir,"f_bc3.eps",sep="")) par(lwd=lwd, cex=cex) hist(y, xlab="Weight (kg)", main="", breaks=b, col='gray', border='white') dev.off() y <- sort(y) # add postscript(paste(dir,"f_bc4.eps",sep="")) par(lwd=lwd, cex=cex) hist(y, xlab="Weight (kg)", main="", breaks=b, col='gray', border='white', freq=FALSE) lines(x=y, y=dBCCG(y, mu=33.8,sigma=.162,nu=0.162),col="blue",lwd=2) dev.off() # LMS method # BCCG # BCT # BCPE # free distribution # --- modelling four moments # GAMLSS framework # crosstable linear/smooth, homo/hetero # linear model, homoskedastic (parametric) # linear model, heteroskedastic (parametric) # smooth model, homoskedastic # smooth model, smooth heteroskedastic # third and fourth moments # --- SESSION 6 # --- DIAGNOSTICS par(old.par) par(cex=1.5, lwd=2, col="darkblue") dir <- path.expand("~/Documents/Sync/Groeistat/NIHES/2012/Slides/Session6Figures/") lwd <- 2 cex <- 1.4 # if necessary first download file, and unzip to get to figures.R and the data #err <- download.file(url="http://www.stefvanbuuren.nl/wormplot/code%20and%20data.zip", # destfile="S:\\projecten\\a-i\\groeistat\\NIHES\\2010\\R\\download.zip") library(gamlss) # fitted.plot data(abdom) abd9 <- gamlss( y~cs(x,df=3), sigma.formula=~cs(x,df=3), nu.formula=~1, tau.fomula=~1, family=BCT, data=abdom) abd10 <- gamlss( y~cs(x,df=1), sigma.formula=~cs(x,df=1), nu.formula=~1, tau.fomula=~1, family=BCT, data=abdom) postscript(paste(dir,"fitted.eps",sep="")) par(lwd=1.7, cex.lab=2) fittedPlot(abd9,abd10,x=abdom$x) dev.off() # plot points and centiles postscript(paste(dir,"cent.eps",sep="")) par(lwd=1.7, cex.lab=2) centiles(abd9,xvar=abdom$x,xlab="Gestational age (week)",ylab="Circumference (cm)") dev.off() # empirical quantiles and fitted quantiles # breaks <- nl4.hfa$age[1:69] # calculate solution to demo worm plot library(AGD) data <- boys7482 data <- na.omit(data[,c("age","hgt","wgt")]) f0051 <- gamlss(hgt~cs(age,df=5,c.spar=c(-1.5,2.5)), sigma.formula=~cs(age,df=1,c.spar=c(-1.5,2.5)), data=data,family=NO, control=gamlss.control(n.cyc=3)) f0101 <- gamlss(hgt~cs(age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(age,df=1,c.spar=c(-1.5,2.5)), data=data,family=NO, control=gamlss.control(n.cyc=3)) f0151 <- gamlss(hgt~cs(age,df=15,c.spar=c(-1.5,2.5)), sigma.formula=~cs(age,df=1,c.spar=c(-1.5,2.5)), data=data,family=NO, control=gamlss.control(n.cyc=3)) f0154 <- gamlss(hgt~cs(age,df=15,c.spar=c(-1.5,2.5)), sigma.formula=~cs(age,df=4,c.spar=c(-1.5,2.5)), data=data,family=NO, control=gamlss.control(n.cyc=3)) # use transformed age t.age <- fitted(lm(data$age~fitted(f0154))) t.age <- t.age - min(t.age) data.t <- data.frame(data,t.age=t.age) f0051r <- gamlss(hgt~cs(t.age,df=5,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=1,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=2)) f0091r <- gamlss(hgt~cs(t.age,df=9,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=1,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=3)) f0101r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=1,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=2)) f0111r <- gamlss(hgt~cs(t.age,df=11,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=1,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=3)) f0105r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=5,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=3)) f0106r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=6,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=3)) f0107r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=7,c.spar=c(-1.5,2.5)), data=data.t,family=NO, control=gamlss.control(n.cyc=3)) f1106r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=6,c.spar=c(-1.5,2.5)), nu.formula=~cs(t.age,df=1,c.spar=c(-1.5,2.5)), data=data.t,family=BCCG, control=gamlss.control(n.cyc=3)) f4106r <- gamlss(hgt~cs(t.age,df=10,c.spar=c(-1.5,2.5)), sigma.formula=~cs(t.age,df=6,c.spar=c(-1.5,2.5)), nu.formula=~cs(t.age,df=4,c.spar=c(-1.5,2.5)), data=data.t,family=BCCG, control=gamlss.control(n.cyc=3)) # utility functions store <- function(..., path=.store){ # stores object(s) in storage directory path (SvB May 2008) names <- as.character(substitute(list(...)))[-1] for (i in 1:length(names)){ name <- names[i] file <- paste(path, name, sep="") file.access save(list=name,file=file) } } fetch <- function(..., path=.store){ # fetches the object from the storage directory in path (SvB May 2008) names <- as.character(substitute(list(...)))[-1] for (i in 1:length(names)){ name <- names[i] file <- paste(path, name, sep="") if (file.access(file,0)== -1) cat(paste("Warning: No file",file,"\n")) else load(file,.GlobalEnv) } } .store <- path.expand("~/Documents/Sync/Groeistat/NIHES/2010/R/store/") # store(f0051,f0101,f0151,f0154,f0051r, f0091r, f0101r, # f0111r, f0105r, f0106r, f0107r, f1106r, f4106r) fetch(f0051,f0101,f0151,f0154,f0051r, f0091r, f0101r, f0111r, f0105r, f0106r, f0107r, f1106r, f4106r) postscript(paste(dir,"c0051.eps",sep="")) centiles(f0051,xvar=data$age,main="0051",pch='.') dev.off() postscript(paste(dir,"c0101.eps",sep="")) centiles(f0101,xvar=data$age,main="0101",pch='.') dev.off() postscript(paste(dir,"c0151.eps",sep="")) centiles(f0151,xvar=data$age,main="0151",pch='.') dev.off() postscript(paste(dir,"c0051r.eps",sep="")) centiles(f0051r,xvar=data$age,main="0051r",pch='.') dev.off() postscript(paste(dir,"c0106r.eps",sep="")) centiles(f0106r,xvar=data$age,main="0106r",pch='.') dev.off() postscript(paste(dir,"c4106r.eps",sep="")) centiles(f4106r,xvar=data$age,main="4106r",pch='.') dev.off() postscript(paste(dir,"w0051.eps",sep="")) wp.twin(f0051,cex=0.5) dev.off() postscript(paste(dir,"w0101.eps",sep="")) wp.twin(f0051,f0101,cex=0.5) dev.off() postscript(paste(dir,"w0151.eps",sep="")) wp.twin(f0101,f0151,cex=0.5) dev.off() postscript(paste(dir,"w0051r.eps",sep="")) wp.twin(f0051,f0051r,cex=0.5) dev.off() postscript(paste(dir,"w0101r.eps",sep="")) wp.twin(f0051r,f0101r,cex=0.5) dev.off() postscript(paste(dir,"w0101r2.eps",sep="")) wp.twin(f0091r,f0101r,cex=0.5) dev.off() postscript(paste(dir,"w0111r.eps",sep="")) wp.twin(f0101r,f0111r,cex=0.5) dev.off() postscript(paste(dir,"w0106r.eps",sep="")) wp.twin(f0101r,f0106r,cex=0.5) dev.off() postscript(paste(dir,"w0106r2.eps",sep="")) wp.twin(f0105r,f0106r,cex=0.5) dev.off() postscript(paste(dir,"w0107r.eps",sep="")) wp.twin(f0106r,f0107r,cex=0.5) dev.off() postscript(paste(dir,"w4106r.eps",sep="")) wp.twin(f0106r,f4106r,cex=0.5) dev.off() # store(f0051,f0101,f0151,f0154,f0051r, f0091r, f0101r, # f0111r, f0105r, f0106r, f0107r, f1106r, f4106r) # #f0164 <- gamlss(hgt~cs(age,df=15), # sigma.formula=~cs(age,df=4), # data=data, family=NO, # control=gamlss.control(n.cyc=3)) #par(lwd=1) #wp.twin(fit3,cex=0.5) # --- SESSION 8 # further tricks, outlook par(cex=1.5, lwd=2, col="darkblue") dir <- path.expand("~/Documents/Sync/Groeistat/NIHES/2012/Slides/Session8Figures/") lwd <- 2 cex <- 1.4 # age transformation library(AGD) data <- boys7482 data <- na.omit(data[,c("age","hgt","wgt")]) fit1 <- gamlss(hgt~cs(age,df=16), sigma.formula=~cs(age,df=4), data=data, family=NO, control=gamlss.control(n.cyc=5)) t.age <- fitted(lm(data$age~fitted(fit1))) postscript(paste(dir,"agetra.eps",sep="")) par(lwd=1.7, cex.lab=2) plot(data$age, t.age, type='l',lwd=2,xlab="Age", ylab="Transformed age", col="blue",ylim=c(0,20),xlim=c(0,20)) abline(0,1,lwd=1) dev.off() # exporting a GAMLSS object to an LMS table data2 <- cbind(data, t.age) lms <- extractLMS(f0106r, data2) tail(lms)
/scratch/gouwar.j/cran-all/cranData/AGD/inst/doc/course.r
######################################################################### # # Package: AGHmatrix # # File: Amatrix.R # Contains: Amatrix # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 20-Mar-2019 # License: GPL-3 # ######################################################################### #' Construction of Relationship Matrix A #' #' Creates an additive relationship matrix A from a pedigree data in a 3-column way format based on ploidy level (an even number) and, if ploidy equals 4, based on proportion of parental gametes that are IBD (Identical by Descent) due to double reduction. Returns a dominance relationship matrix if dominance true (ploidy 2 only). Autopolyploid matrices based on Kerr (2012), used when `ploidy` argument is higher than `2` and `dominance=FALSE`. #' Diploid additive numerator relationship matrix built as in Henderson (1976), used when `ploidy=2` and `dominance=FALSE`. Diploid dominance numerator relationship matrix built as in Cockerham (1954), used when `ploidy=2` and `dominance=FALSE`. For details of recursive method see Mrode (2005). #' #' @param data pedigree data name (3-column way format). Unknown value should be equal 0. #' @param ploidy an even number (default=2). #' @param w proportion of parental gametas IBD due to double reduction (default=0), only if ploidy=4. #' @param verify verifies pedigree file for conflictuos entries (default=TRUE). #' @param dominance if true, returns the dominance relationship matrix #' @param slater if true, returns the additive autotetraploid relationship matrix as Slater (2013) #' @param ASV if TRUE, transform matrix into average semivariance (ASV) equivalent (K = K / (trace(K) / (nrow(K)-1))). Details formula 2 of Fieldmann et al. (2022). Default = FALSE. #' @param ... arguments to be passed to datatreat() #' #' @return Matrix with the Relationship between the individuals. #' #' @examples #' data(ped.mrode) #' #Computing additive relationship matrix considering diploidy (Henderson 1976): #' Amatrix(ped.mrode, ploidy=2) #' #Computing non-additive relationship matrix considering diploidy (Cockerham 1954): #' Amatrix(ped.mrode, ploidy=2, dominance=TRUE) #' #Computing additive relationship matrix considering autotetraploidy (Kerr 2012): #' Amatrix(ped.mrode, ploidy=4) #' #Computing additive relationship matrix considering autooctaploidy (Kerr 2012): #' Amatrix(ped.mrode, ploidy=8) #' #Computing additive relationship matrix considering autotetraploidy and double- #' #reduction of 0.1 (Kerr 2012): #' Amatrix(ped.mrode, ploidy=4, w=0.1) #' #Computing additive relationship matrix considering #' #autotetraploidy and double-reduction of 0.1 (Slater 2014): #' Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) #' #Computing additive relationship matrix considering autohexaploidy and double- #' #reduction of 0.1 (Kerr 2012): #' Amatrix(ped.mrode, ploidy=6, w=0.1) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' @references \emph{Cockerham, CC. 1954. An extension of the concept of partitioning hereditary variance for analysis of covariances among relatives when epistasis is present. Genetics 39, 859–882} #' @references \emph{Feldmann MJ, et al. 2022. Average semivariance directly yields accurate estimates of the genomic variance in complex trait analyses. G3 (Bethesda), 12(6).} #' @references \emph{Henderson, CR. 1976. A simple method for computing the inverse of a numerator relationship matrix used in prediction of breeding values. Biometrics 32, 69-83} #' @references \emph{Kerr, RJ, et al. 2012. Use of the numerator relationship matrix in genetic analysis of autopolyploid species. Theoretical and Applied Genetics 124 1271-1282} #' @references \emph{Mrode, RA. 2014. Chapter 2: Genetic Covariance Between Relatives and Chapter 9: Non-additive Animal Models in Mrode, RA. 2014. Linear models for the prediction of animal breeding values. Cabi, 3rd edition.} #' @references \emph{Slater, AT, et al. 2013. Improving the analysis of low heritability complex traits for enhanced genetic gain in potato. Theoretical and Applied Genetics 127, 809-820} #' #' @export Amatrix <- function(data = NULL, ploidy=2, w=0, verify=TRUE, dominance=FALSE, slater=FALSE, ASV=FALSE, ...){ if(ploidy%%2!=0) stop(deparse("Ploidy should be an even number")) if(ploidy!=2 & dominance) stop(deparse("Dominance relationship matrix is implemented only for ploidy=2")) if( is.null(data)) stop(deparse("Please define the variable data")) unk=0 cat("Verifying conflicting data... \n") flag<-verifyped(data) if(flag) stop(deparse("Please double-check your data and try again")) cat("Organizing data... \n") orig.order <- as.character(data[,1]) data.after.treat <- try(datatreat(data=data,unk=unk,...),silent=TRUE) # checking if order was fixed flag = FALSE flag = inherits(data.after.treat,"try-error") if(!flag) flag = (length(unique(data.after.treat$ind.data))!=nrow(data)) if(flag){ cat("To organize the data in a fast way wasn't possible... \n") cat("Trying to organize in a slow (naive) way... \n") data.sorted <- sortped(data) data.after.treat <- try(datatreat(data=data.sorted,unk=unk,...)) # checking if order was fixed flag = FALSE flag = inherits(data.after.treat,"try-error") if(!flag) flag = (length(unique(data.after.treat$ind.data))!=nrow(data)) if(flag){ cat("It wasn't possible to organize your data chronologically. We recommend you to do it by hand or use the flag 'naive_sort=TRUE'. If the problem persists, please contact this package mainteiner \n") return() } } data <- data.after.treat s <- data$sire d <- data$dire if( is.null(s) || is.null(d) ) stop(deparse("Please define the variable s (sire) and/or d (dire)")) if( length(s) != length(d) ) stop(deparse("Please verify the variable s (sire) and/or d (dire), they don't have the same length")) if( !is.numeric(s) || !is.numeric(d) ) stop(deparse("Pleasy verify your data, it has to be 2 numeric vectors")) if( length(data$sire) > 1000 ) cat("Processing a large pedigree data... It may take a couple of minutes... \n") n <- length(s) A <- matrix(NA,ncol=n,nrow=n) Time = proc.time() #### For ploidy 2 #### if(ploidy == 2){ w <- NA cat("Constructing matrix A using ploidy = 2 \n") A[1,1] <- 1 for( i in 2:n){ ## Both are unknown if( s[i] == 0 && d[i] == 0 ){ A[i,i] <- 1 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0 } ## Sire is unknown if( s[i] == 0 && d[i] != 0 ){ A[i,i] <- 1 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,d[i]]) } ## Dire is unknown if( d[i] == 0 && s[i] != 0 ){ A[i,i] <- 1 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]) } ## Both are known if( d[i] != 0 && s[i] != 0 ){ A[i,i] <- 1+0.5*(A[d[i],s[i]]) for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]+A[j,d[i]]) } } } if(dominance){ cat("Constructing dominance relationship matrix \n") D <- matrix(NA,ncol=n,nrow=n) for(i in 1:n){ for(j in 1:n){ u1 <- ifelse(length(A[s[i],s[j]])>0,A[s[i],s[j]],0) u2 <- ifelse(length(A[d[i],d[j]])>0,A[d[i],d[j]],0) u3 <- ifelse(length(A[s[i],d[j]])>0,A[s[i],d[j]],0) u4 <- ifelse(length(A[s[j],d[i]])>0,A[s[j],d[i]],0) D[i,j] <- D[j,i] <- 0.25*(u1*u2+u3*u4) } } diag(D)<-1 A<-D D<-NULL } #### For ploidy 4 #### if(slater==TRUE){ listA <- list() cat(paste("Constructing matrix A using ploidy = 4 and proportion of double reduction =",w,";as in Slater et al. (2014) \n")) start.time <- Sys.time() A[1,1] <- (1+w)/4 for( i in 2:n){ ## Both are unknown if( s[i] == 0 && d[i] == 0 ){ A[i,i] <- (1+w)/4 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0 } ## Sire is unknown if( s[i] == 0 && d[i] != 0 ){ A[i,i] <- (5 + 7*w + 4*A[d[i],d[i]]*(1-w) ) / 24 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,d[i]]) } ## Dire is unknown if( d[i] == 0 && s[i] != 0 ){ A[i,i] <- (5 + 8*w + 4*A[s[i],s[i]]*(1-w) ) / 24 ##On Slater in 7w, deriving from hand based on Kerr is 8w for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]) } ## Both are known if( d[i] != 0 && s[i] != 0 ){ A[i,i] <- (1 + 2*w + (1-w)*(A[s[i],s[i]]) + (1-w)*(A[d[i],d[i]]) + 3*A[s[i],d[i]] ) / 6 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]+A[j,d[i]]) } } A <- 4*A } if(slater==FALSE && ploidy>2){ ## It does not use double-reduction proportion, need to double-check formula on kerr 2012 for higher ploidies... listA <- list() cat(paste("Constructing matrix A using ploidy =",ploidy,"and proportion of double reduction =",w,";as in Kerr et al. (2012) \n")) start.time <- Sys.time() v = ploidy/2 A[1,1] <- (1)/(2*v) for( i in 2:n){ ## Both are unknown if( s[i] == 0 && d[i] == 0 ){ A[i,i] <- (1)/(2*v) for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0 } ## Sire is unknown if( s[i] == 0 && d[i] != 0 ){ A[i,i] <- (1 + (v-1)*w + ((v-1)*(1-w)*(v*A[d[i],d[i]] + 1/2 - 1))/(2*v-1))/(2*v) for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,d[i]]) } ## Dire is unknown if( d[i] == 0 && s[i] != 0 ){ A[i,i] <- (1 + (v-1)*w + ((v-1)*(1-w)*(v*A[s[i],s[i]] + 1/2 - 1))/(2*v-1))/(2*v) for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]) } ## Both are known if( d[i] != 0 && s[i] != 0 ){ A[i,i] <- (1 + (v-1)*w + ((v-1)*(1-w)*(v*A[d[i],d[i]] + v*A[s[i],s[i]] - 1)/(2*v-1)))/(2*v) + A[d[i],s[i]]/2 for( j in 1:(i-1)) A[j,i] <- A[i,j] <- 0.5*(A[j,s[i]]+A[j,d[i]]) } } A <- 2*v*A } NA.errors <- which(is.na(A)) if( length(NA.errors) > 0 ) cat("Please verify your original data with the function 'verifyped', there are some data missing/conflicting data \n") Time = as.matrix(proc.time()-Time) cat("Completed! Time =", Time[3]/60," minutes \n") # "Visualization options: (matrix, w) \n ") rownames(A) <- colnames(A) <- data$ind.data A <- A[orig.order,orig.order] if (ASV) { A = get_ASV(A) } return(A) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/Amatrix.R
######################################################################### # # Package: AGHmatrix # # File: AmatrixPolyCross.R # Contains: AmatrixPolyCross # # Written by Rodrigo Rampazo Amadeu # Contributors: Leticia AC Lara # # First version: 20-Nov-2020 # Last update: 05-Aug-2021 # License: GPL-3 # ######################################################################### #' Construction of pedigree-based relationship matrix with parental guessing possibility #' #' Creates an additive relationship matrix A based on a non-deterministic pedigree with 4+ columns where each column represents a possible parent. This function was built with the following designs in mind. #' 1) A mating design where you have equally possible parents. For example, a generation of insects derived from the mating of three insects in a cage. All the insects in this generation will have the same expected relatedness with all the possible parents (1/3). If there are only two parents in the cage, the function assumes no-inbreeding and the pedigree is deterministic (the individual is offspring of the cross between the two parents). Another example, a population of 10 open-pollinated plants where you harvest the seeds without tracking the mother. #' 2) When fixedParent is TRUE: a mating design where you know one parent and might know the other possible parents. For example, a polycross design where you have seeds harvested from a mother plant and possible polen donors. #' #' @param data pedigree data name. Unknown value should be equal 0. See example for construction. #' @param fixedParent if false, assumes that all the parents are equally possible parents. If true, assumes that the first parental is known and the others are equally possible parents. Default = FALSE. #' #' @return Matrix with the relationship between the individuals. #' #' @examples #' #the following pedigree has the id of the individual followed by possible parents #' #if 0 is unknown #' #the possible parents are filled from left to right #' #in the pedigree data frame examples: #' #id 1,2,3,4 have unknown parents and are assumed unrelated #' #id 5 has three possible parents (1,2,3) #' #id 6 has three possible parents (2,3,4) #' #id 7 has two parents (deterministic case here, the parents are 3 and 4) #' #id 8 has four possible parents (5,6,7,1) #' #' pedigree = data.frame(id=1:8, #' parent1 = c(0,0,0,0,1,2,3,5), #' parent2 = c(0,0,0,0,2,3,4,6), #' parent3 = c(0,0,0,0,3,4,0,7), #' parent4 = c(0,0,0,0,0,0,0,1), #' parent5 = 0) #' #' print(pedigree) #' #' AmatrixPolyCross(pedigree) #' #' #when polyCross is set to be true: #' #id 5 is offspring of parent 1 in a deterministic way and two other possible parents (2,3) #' #id 6 is offspring of parent 2 in a deterministic way and two other possible parents (3,4) #' #id 7 has two parents (deterministic case here, the parents are 3 and 4); as before #' #id 8 is offspring of parent 5 in a deterministic way and has three other possible parents (6,7,1) #' #' AmatrixPolyCross(pedigree,fixedParent=TRUE) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export #' AmatrixPolyCross = function(data = NULL, fixedParent=FALSE){ unk = 0 orig.order <- as.character(data[, 1]) data1 <- AGHmatrix::datatreat(data = data[,c(1,2,3)], unk = unk) Parents <- data1$sire Parents <- rbind(Parents,data1$dire) for(i in 4:ncol(data)) Parents <- rbind(Parents, AGHmatrix::datatreat(data = data[,c(1,2,i)], unk = unk)$dire) for(i in 1:ncol(Parents)){ tst = which(Parents[,i]!=0) if(length(tst)>0) if(min(which(Parents[,i]==0)) < max(tst)) stop(deparse(paste0("Check parent order of line ",i, ", missing (or non-used) values should be located at the right!"))) } if (length(data$sire) > 1000) cat("Processing a large pedigree data... It may take a couple of minutes... \n") if(fixedParent==FALSE){ n <- ncol(Parents) Time = proc.time() combs = NULL for(i in 2:nrow(Parents)){ combs = cbind(combs, combn(i,2)) } combs = t(unique(t(combs))) #fixing order by number of max parents cat("Constructing matrix A using ploidy = 2 \n") Afinal = matrix(NA,n,n) Afinal[1, 1] <- 1 Acombs <- array(NA, dim=c(n,n,ncol(combs))) for (i in 2:n) { for(combsIndex in 1:ncol(combs)){ A <- Afinal s <- Parents[combs[1,combsIndex],] d <- Parents[combs[2,combsIndex],] if (s[i] == 0 && d[i] == 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0 } if (s[i] == 0 && d[i] != 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, d[i]]) } if (d[i] == 0 && s[i] != 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, s[i]]) } if (d[i] != 0 && s[i] != 0) { A[i, i] <- 1 + 0.5 * (A[d[i], s[i]]) for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, s[i]] + A[j, d[i]]) } Acombs[1:i,1:i,combsIndex] = A[1:i,1:i] } ## Total parents totalParents = length(which(Parents[,i]!=0)) if(totalParents < 3){ Afinal[i,] = Afinal[,i] = Acombs[i,,1] }else{ Afinal[i,] = Afinal[,i] = apply(Acombs[i,,(1:choose(totalParents,2))],1,mean) } } A = Afinal } if(fixedParent==TRUE){ n <- ncol(Parents) Time = proc.time() Afinal = matrix(NA,n,n) Afinal[1, 1] <- 1 Acombs <- array(NA, dim=c(n,n,(nrow(Parents)-1))) s <- Parents[1,] for (i in 2:n) { for(combsIndex in 1:(nrow(Parents)-1)){ A <- Afinal d <- Parents[combsIndex+1,] if (s[i] == 0 && d[i] == 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0 } if (s[i] == 0 && d[i] != 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, d[i]]) } if (d[i] == 0 && s[i] != 0) { A[i, i] <- 1 for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, s[i]]) } if (d[i] != 0 && s[i] != 0) { A[i, i] <- 1 + 0.5 * (A[d[i], s[i]]) for (j in 1:(i - 1)) A[j, i] <- A[i, j] <- 0.5 * (A[j, s[i]] + A[j, d[i]]) } Acombs[1:i,1:i,combsIndex] = A[1:i,1:i] } ## Total parents totalParents = length(which(Parents[,i]!=0)) if(totalParents < 3){ Afinal[i,] = Afinal[,i] = Acombs[i,,1] }else{ Afinal[i,] = Afinal[,i] = apply(Acombs[i,,1:(totalParents-1)],1,mean) } } A = Afinal } Time = as.matrix(proc.time() - Time) cat("Completed! Time =", Time[3]/60, " minutes \n") rownames(A) <- colnames(A) <- data1$ind.data A <- A[orig.order, orig.order] return(A) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/AmatrixPolyCross.R
##################################################################### # # Package: AGHmatrix # # File: Gmatrix.R # Contains: Gmatrix slater_par check_Gmatrix_data # # Written by Rodrigo Rampazo Amadeu # Contributors: Marcio Resende Jr, Leticia AC Lara, Ivone Oliveira, Luis Felipe V Ferrao # # First version: Feb-2014 # Last update: 05-Aug-2021 # License: GPL-3 # ##################################################################### #' Construction of Relationship Matrix G #' #' Given a matrix (individual x markers), a method, a missing value, and a maf threshold, return a additive or non-additive relationship matrix. For diploids, the methods "Yang" and "VanRaden" for additive relationship matrices, and "Su" and "Vitezica" for non-additive relationship matrices are implemented. For autopolyploids, the method "VanRaden" for additive relationship, method "Slater" for full-autopolyploid model including non-additive effects, and pseudo-diploid parametrization are implemented. Weights are implemented for "VanRaden" method as described in Liu (2020). #' #' @param SNPmatrix matrix (n x m), where n is is individual names and m is marker names (coded inside the matrix as 0, 1, 2, ..., ploidy, and, missingValue). #' @param method "Yang" or "VanRaden" for marker-based additive relationship matrix. "Su" or "Vitezica" for marker-based dominance relationship matrix. "Slater" for full-autopolyploid model including non-additive effects. "Endelman" for autotetraploid dominant (digentic) relationship matrix. "MarkersMatrix" for a matrix with the amount of shared markers between individuals (3). Default is "VanRaden", for autopolyploids will be computed a scaled product (similar to Covarrubias-Pazaran, 2006). #' @param missingValue missing value in data. Default=-9. #' @param thresh.missing threshold on missing data, SNPs below of this frequency value will be maintained, if equal to 1, no threshold and imputation is considered. Default = 0.50. #' @param maf minimum allele frequency accepted to each marker. Default=0. #' @param verify.posdef verify if the resulting matrix is positive-definite. Default=FALSE. #' @param ploidy data ploidy (an even number between 2 and 20). Default=2. #' @param pseudo.diploid if TRUE, uses pseudodiploid parametrization of Slater (2016). #' @param ratio if TRUE, molecular data are considered ratios and its computed the scaled product of the matrix (as in "VanRaden" method). #' @param impute.method "mean" to impute the missing data by the mean per marker, "mode" to impute the missing data by the mode per marker, "global.mean" to impute the missing data by the mean across all markers, "global.mode" to impute the missing data my the mode across all marker. Default = "mean". #' @param integer if FALSE, not check for integer numbers. Default=TRUE. #' @param ratio.check if TRUE, run Mcheck with ratio data. #' @param weights vector with weights for each marker. Only works if method="VanRaden". Default is a vector of 1's (equal weight). #' @param ploidy.correction It sets the denominator (correction) of the crossprod. Used only when ploidy > 2 for "VanRaden" and ratio models. If TRUE, it uses the sum of "Ploidy" times "Frequency" times "(1-Frequency)" of each marker as method 1 in VanRaden 2008 and Endelman (2018). When ratio=TRUE, it uses "1/Ploidy" times "Frequency" times "(1-Frequency)". If FALSE, it uses the sum of the sampling variance of each marker. Default = FALSE. #' @param rmv.mono if monomorphic markers should be removed. Default=FALSE. #' @param thresh.htzy threshold heterozigosity, remove SNPs below this threshold. Default=0. #' @param ASV if TRUE, transform matrix into average semivariance (ASV) equivalent (K = K / (trace(K) / (nrow(K)-1))). Details formula 2 of Fieldmann et al. (2022). Default = FALSE. #' @return Matrix with the marker-bases relationships between the individuals #' #' @examples #' \dontrun{ #' ## Diploid Example #' data(snp.pine) #' #Verifying if data is coded as 0,1,2 and missing value. #' str(snp.pine) #' #Build G matrices #' Gmatrix.Yang <- Gmatrix(snp.pine, method="Yang", missingValue=-9, maf=0.05) #' Gmatrix.VanRaden <- Gmatrix(snp.pine, method="VanRaden", missingValue=-9, maf=0.05) #' Gmatrix.Su <- Gmatrix(snp.pine, method="Su", missingValue=-9, maf=0.05) #' Gmatrix.Vitezica <- Gmatrix(snp.pine, method="Vitezica", missingValue=-9, maf=0.05) #' #' ## Autetraploid example #' data(snp.sol) #' #Build G matrices #' Gmatrix.VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) #' Gmatrix.Endelman <- Gmatrix(snp.sol, method="Endelman", ploidy=4) #' Gmatrix.Slater <- Gmatrix(snp.sol, method="Slater", ploidy=4) #' Gmatrix.Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) #' #' #Build G matrix with weights #' Gmatrix.weighted <- Gmatrix(snp.sol, method="VanRaden", weights = runif(3895,0.001,0.1), ploidy=4) #' } #' #' @author Rodrigo R Amadeu \email{rramadeu@@gmail.com}, Marcio Resende Jr, Letícia AC Lara, Ivone Oliveira, and Felipe V Ferrao #' #' @references \emph{Covarrubias-Pazaran, G. 2016. Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6):1-15.} #' @references \emph{Endelman, JB, et al., 2018. Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. Genetics, 209(1) pp. 77-87.} #' @references \emph{Feldmann MJ, et al. 2022. Average semivariance directly yields accurate estimates of the genomic variance in complex trait analyses. G3 (Bethesda), 12(6).} #' @references \emph{Liu, A, et al. 2020. Weighted single-step genomic best linear unbiased prediction integrating variants selected from sequencing data by association and bioinformatics analyses. Genet Sel Evol 52, 48.} #' @references \emph{Slater, AT, et al. 2016. Improving genetic gain with genomic selection in autotetraploid potato. The Plant Genome 9(3), pp.1-15.} #' @references \emph{Su, G, et al. 2012. Estimating additive and non-additive genetic variances and predicting genetic merits using genome-wide dense single nucleotide polymorphism markers. PloS one, 7(9), p.e45293.} #' @references \emph{VanRaden, PM, 2008. Efficient methods to compute genomic predictions. Journal of dairy science, 91(11), pp.4414-4423.} #' @references \emph{Vitezica, ZG, et al. 2013. On the additive and dominant variance and covariance of individuals within the genomic selection scope. Genetics, 195(4), pp.1223-1230.} #' @references \emph{Yang, J, et al. 2010. Common SNPs explain a large proportion of the heritability for human height. Nature genetics, 42(7), pp.565-569.} #' #' @export Gmatrix <- function (SNPmatrix = NULL, method = "VanRaden", missingValue = -9, maf = 0, thresh.missing = .50, verify.posdef = FALSE, ploidy=2, pseudo.diploid = FALSE, integer=TRUE, ratio = FALSE, impute.method = "mean", rmv.mono=FALSE, thresh.htzy=0, ratio.check = TRUE, weights = NULL, ploidy.correction = FALSE, ASV=FALSE){ Time = proc.time() markers = colnames(SNPmatrix) if(!is.null(weights)) if(length(weights)!=ncol(SNPmatrix)) stop(deparse("weight should be a numeric vector of the same number of markers in the SNPmatrix")) if(ratio){ #This allows to enter in the scaled crossprod condition method="VanRaden" } if (!is.na(missingValue)) { m <- match(SNPmatrix, missingValue, 0) SNPmatrix[m > 0] <- NA } check_Gmatrix_data(SNPmatrix=SNPmatrix,method=method,ploidy=ploidy,ratio=ratio,integer=integer) NumberMarkers <- ncol(SNPmatrix) nindTotal <- colSums(!is.na(SNPmatrix)) nindAbs <- max(nindTotal) cat("Initial data: \n") cat("\tNumber of Individuals:", max(nindTotal), "\n") cat("\tNumber of Markers:", NumberMarkers, "\n") if(ratio==FALSE){ SNPmatrix <- Mcheck(SNPmatrix, ploidy = ploidy, thresh.maf = maf, rmv.mono = rmv.mono, thresh.htzy = thresh.htzy, thresh.missing = thresh.missing, impute.method = impute.method) } ## Testing ratio check function: not final! if(ratio && ratio.check){ SNPmatrix <- Mcheck(SNPmatrix, ploidy = ploidy, thresh.maf = maf, rmv.mono = rmv.mono, thresh.missing = thresh.missing, impute.method = impute.method) } if(method=="Slater"){ P <- colSums(SNPmatrix,na.rm = TRUE)/nrow(SNPmatrix) SNPmatrix[,which(P>ploidy/2)] <- ploidy-SNPmatrix[,which(P>(ploidy/2))] SNPmatrix <- slater_par(SNPmatrix,ploidy=ploidy) NumberMarkers <- ncol(SNPmatrix) Frequency <- colSums(SNPmatrix,na.rm=TRUE)/nrow(SNPmatrix) FreqP <- matrix(rep(Frequency, each = nrow(SNPmatrix)), ncol = ncol(SNPmatrix)) } if(ploidy==2){ alelleFreq <- function(x, y) { (2 * length(which(x == y)) + length(which(x == 1)))/(2 * length(which(!is.na(x)))) } Frequency <- cbind(apply(SNPmatrix, 2, function(x) alelleFreq(x,0)) , apply(SNPmatrix, 2, function(x) alelleFreq(x, 2))) # if (any(Frequency[, 1] <= maf) & maf != 0) { # cat("\t", length(which(Frequency[, 1] <= maf)), "markers dropped due to maf cutoff of", maf, "\n") # SNPmatrix <- SNPmatrix[,-which(Frequency[, 1] <= maf)] # cat("\t", ncol(SNPmatrix), "markers kept \n") # Frequency <- as.matrix(Frequency[-which(Frequency[,1] <= # maf), ]) # NumberMarkers <- ncol(SNPmatrix) # } FreqP <- matrix(rep(Frequency[, 2], each = nrow(SNPmatrix)), ncol = ncol(SNPmatrix)) } if(ploidy>2 && pseudo.diploid){## Uses Pseudodiploid model P <- colSums(SNPmatrix,na.rm = TRUE)/nrow(SNPmatrix) SNPmatrix[,which(P>ploidy/2)] <- ploidy-SNPmatrix[,which(P>(ploidy/2))] Frequency <- colSums(SNPmatrix,na.rm=TRUE)/(ploidy*nrow(SNPmatrix)) Frequency <- cbind(1-Frequency,Frequency) FreqP <- matrix(rep(Frequency[, 2], each = nrow(SNPmatrix)), ncol = ncol(SNPmatrix)) SNPmatrix[SNPmatrix %in% c(1:(ploidy-1))] <- 1 SNPmatrix[SNPmatrix==ploidy] <- 2 } if (method == "MarkersMatrix") { Gmatrix <- !is.na(SNPmatrix) Gmatrix <- tcrossprod(Gmatrix, Gmatrix) return(Gmatrix) } ## VanRaden ## if (method == "VanRaden") { if(is.null(weights)){ if(ploidy==2 & ratio==FALSE){ TwoPQ <- 2 * t(Frequency[, 1]) %*% Frequency[, 2] SNPmatrix <- SNPmatrix- 2 * FreqP SNPmatrix[is.na(SNPmatrix)] <- 0 Gmatrix <- (tcrossprod(SNPmatrix, SNPmatrix))/as.numeric(TwoPQ) }else{ if(ploidy.correction){ if(ratio==FALSE){ Frequency <- apply(X=SNPmatrix,FUN=mean,MARGIN=2,na.rm=TRUE)/ploidy K <- sum(ploidy * Frequency * (1-Frequency)) }else{ Frequency <- apply(X=SNPmatrix,FUN=mean,MARGIN=2,na.rm=TRUE) K <- sum(1/ploidy * Frequency * (1-Frequency)) } } SNPmatrix<-scale(SNPmatrix,center=TRUE,scale=FALSE) if(!ploidy.correction){ K <- sum(apply(X=SNPmatrix,FUN=var,MARGIN=2,na.rm=TRUE)) } SNPmatrix[which(is.na(SNPmatrix))] <- 0 Gmatrix<-tcrossprod(SNPmatrix)/K } }else{ weights = weights[match(colnames(SNPmatrix),markers)] if(ploidy==2 & ratio==FALSE){ TwoPQ <- 2 * t(Frequency[, 1]) %*% Frequency[, 2] SNPmatrix <- SNPmatrix- 2 * FreqP SNPmatrix[is.na(SNPmatrix)] <- 0 Gmatrix <- tcrossprod(tcrossprod(SNPmatrix, diag(weights)), SNPmatrix)/as.numeric(TwoPQ) }else{ if(ploidy.correction){ if(ratio==FALSE){ Frequency <- apply(X=SNPmatrix,FUN=mean,MARGIN=2,na.rm=TRUE)/ploidy K <- sum(ploidy * Frequency * (1-Frequency)) }else{ Frequency <- apply(X=SNPmatrix,FUN=mean,MARGIN=2,na.rm=TRUE) K <- sum(Frequency * (1-Frequency)) } } SNPmatrix<-scale(SNPmatrix,center=TRUE,scale=FALSE) if(!ploidy.correction){ K <- sum(apply(X=SNPmatrix,FUN=var,MARGIN=2,na.rm=TRUE)) } SNPmatrix[which(is.na(SNPmatrix))] <- 0 Gmatrix<-tcrossprod(tcrossprod(SNPmatrix, diag(weights)), SNPmatrix)/K } } } if (method == "Yang") { FreqPQ <- matrix(rep(2 * Frequency[, 1] * Frequency[, 2], each = nrow(SNPmatrix)), ncol = ncol(SNPmatrix)) G.all <- (SNPmatrix^2 - (1 + 2 * FreqP) * SNPmatrix + 2 * (FreqP^2))/FreqPQ G.ii <- as.matrix(colSums(t(G.all), na.rm = T)) SNPmatrix <- (SNPmatrix - (2 * FreqP))/sqrt(FreqPQ) G.ii.hat <- 1 + (G.ii)/NumberMarkers SNPmatrix[is.na(SNPmatrix)] <- 0 Gmatrix <- (tcrossprod(SNPmatrix, SNPmatrix))/NumberMarkers diag(Gmatrix) <- G.ii.hat } if (method == "Su"){ TwoPQ <- 2*(FreqP)*(1-FreqP) SNPmatrix[SNPmatrix==2 | SNPmatrix==0] <- 0 SNPmatrix <- SNPmatrix - TwoPQ SNPmatrix[is.na(SNPmatrix)] <- 0 Gmatrix <- tcrossprod(SNPmatrix,SNPmatrix)/ sum(TwoPQ[1,]*(1-TwoPQ[1,])) } if (method == "Vitezica"){ TwoPQ <- 2*(FreqP[1,])*(1-FreqP[1,]) SNPmatrix[is.na(SNPmatrix)] <- 0 SNPmatrix <- (SNPmatrix==0)*-2*(FreqP^2) + (SNPmatrix==1)*2*(FreqP)*(1-FreqP) + (SNPmatrix==2)*-2*((1-FreqP)^2) Gmatrix <- tcrossprod(SNPmatrix,SNPmatrix)/sum(TwoPQ^2) } if (method == "Slater"){ drop.alleles <- which(Frequency==0) if(length(drop.alleles)>0){ Frequency <- Frequency[-drop.alleles] SNPmatrix <- SNPmatrix[,-drop.alleles] FreqP <- FreqP[,-drop.alleles] } FreqPQ <- matrix(rep(Frequency * (1-Frequency), each = nrow(SNPmatrix)), ncol = ncol(SNPmatrix)) SNPmatrix[which(is.na(SNPmatrix))] <- 0 G.ii <- (SNPmatrix^2 - (2 * FreqP) * SNPmatrix + FreqP^2)/FreqPQ G.ii <- as.matrix(colSums(t(G.ii), na.rm = T)) G.ii <- 1 + (G.ii)/NumberMarkers SNPmatrix <- (SNPmatrix - (FreqP))/sqrt(FreqPQ) SNPmatrix[is.na(SNPmatrix)] <- 0 Gmatrix <- (tcrossprod(SNPmatrix, SNPmatrix))/NumberMarkers diag(Gmatrix) <- G.ii } if( method == "Endelman" ){ if( ploidy != 4 ){ cat( stop( "'Endelman' method is just implemented for ploidy=4" )) } Frequency <- colSums(SNPmatrix)/(nrow(SNPmatrix)*ploidy) Frequency <- cbind(Frequency,1-Frequency) SixPQ <- 6 * t((Frequency[, 1]^2)) %*% (Frequency[, 2]^2) SNPmatrix <- 6 * t((Frequency[, 1]^2)%*%t(rep(1,nrow(SNPmatrix)))) - 3*t((Frequency[, 1])%*%t(rep(1,nrow(SNPmatrix))))*SNPmatrix + 0.5 * SNPmatrix*(SNPmatrix-1) Gmatrix <- (tcrossprod(SNPmatrix, SNPmatrix))/as.numeric(SixPQ) } if (verify.posdef) { e.values <- eigen(Gmatrix, symmetric = TRUE)$values indicator <- length(which(e.values <= 0)) if (indicator > 0) cat("\t Matrix is NOT positive definite. It has ", indicator, " eigenvalues <= 0 \n \n") } if(ASV){ Gmatrix = get_ASV(Gmatrix) } Time = as.matrix(proc.time() - Time) cat("Completed! Time =", Time[3], " seconds \n") gc() return(Gmatrix) } ## Internal Functions ## get_ASV = function(x){ return( x / ( sum(diag(x)) / (nrow(x) - 1)) ) } # Coding SNPmatrix as Slater (2016) Full autotetraploid model including non-additive effects (Presence/Absence per Genotype per Marker) slater_par <- function(X,ploidy){ prime.index <- c(3,5,7,11,13,17,19,23,29,31,37, 41,43,47,53,59,61,67,71,73,79) NumberMarkers <- ncol(X) nindTotal <- nrow(X) X <- X+1 ## Breaking intervals to use less RAM temp <- seq(1,NumberMarkers,10000) temp <- cbind(temp,temp+9999) temp[length(temp)] <- NumberMarkers prime.index <- prime.index[1:(ploidy+1)] ## Uses Diagonal (which is Sparse mode, uses less memmory) for(i in 1:nrow(temp)){ X.temp <- X[,c(temp[i,1]:temp[i,2])] NumberMarkers <- ncol(X.temp) X.temp <- X.temp %*% t(kronecker(diag(NumberMarkers),prime.index)) X.temp[which(as.vector(X.temp) %in% c(prime.index*c(1:(ploidy+1))))] <- 1 X.temp[X.temp!=1] <- 0 if(i==1){ X_out <- X.temp }else{ X_out <- cbind(X_out,X.temp) } } gc() return(X_out) } # Internal function to check input Gmatrix arguments check_Gmatrix_data <- function(SNPmatrix,ploidy,method, ratio=FALSE, integer=TRUE){ if (is.null(SNPmatrix)) { stop(deparse("Please define the variable SNPdata")) } if (all(method != c("Yang", "VanRaden", "Slater", "Su", "Vitezica", "MarkersMatrix","Endelman"))) { stop("Method to build Gmatrix has to be either `Yang` or `VanRaden` for marker-based additive relationship matrix, or `Su` or `Vitezica` or `Endelman` for marker-based dominance relationship matrx, or `MarkersMatrix` for matrix with amount of shared-marks by individuals pairs") } # if( method=="Yang" && ploidy>2) # stop("Change method to 'VanRaden' for ploidies higher than 2 for marker-based additive relationship matrix") if( method=="Su" && ploidy>2) stop("Change method to 'Slater' for ploidies higher than 2 for marker-based non-additive relationship matrix") if( method=="Vitezica" && ploidy>2) stop("Change method to 'Slater' for ploidies higher than 2 for marker-based non-additive relationship matrix") if(!is.matrix(SNPmatrix)){ cat("SNPmatrix class is:",class(SNPmatrix),"\n") stop("SNPmatrix class must be matrix. Please verify it.") } if(!ratio){ if( ploidy > 20 | (ploidy %% 2) != 0) stop(deparse("Only even ploidy from 2 to 20")) t <- max(SNPmatrix,na.rm = TRUE) if( t > ploidy ) stop(deparse("Check your data, it has values above ploidy number")) t <- min(SNPmatrix,na.rm=TRUE) if( t < 0 ) stop(deparse("Check your data, it has values under 0")) if(integer) if(prod(SNPmatrix == round(SNPmatrix),na.rm = TRUE)==0) stop(deparse("Check your data, it has not integer values")) } if(ratio){ t <- max(SNPmatrix,na.rm = TRUE) if( t > 1) stop(deparse("Check your data, it has values above 1. It is expected a ratio values [0;1].")) t <- min(SNPmatrix,na.rm=TRUE) if( t < 0 ) stop(deparse("Check your data, it has values under 0. It is expected a ratio values [0;1].")) } }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/Gmatrix.R
######################################## # # Package: AGHmatrix # # File: Hmatrix.R # Contains: Hmatrix # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 09-Jul-2019 # License: GPL-3 # ####################################### #' Construction of Combined Relationship Matrix H #' #' Given a matrix A and a matrix G returns a H matrix. H matrix is the relationship matrix using combined information from the pedigree and genomic relationship matrices. First, you need to compute the matrices separated and then use them as input to build the combined H matrix. #' Two methods are implemented: `Munoz` shrinks the G matrix towards the A matrix scaling the molecular relatadness by each relationship classes; #' `Martini` is a modified version from Legarra et al. (2009) where combines A and G matrix using scaling factors. When method is equal `Martini` and `tau=1` and `omega=1` you have the same H matrix as in Legarra et al. (2009). #' #' @param A A matrix from function Amatrix #' @param G G matrix from function Gmatrix #' @param markers matrix marker which generated the Gmatrix #' @param c constant value of H computation, default: c=0 #' @param method "Martini" or "Munoz", default="Martini" #' @param missingValue missing value in data, default=-9. #' @param maf max of missing data accepted to each markerm default=0.05. #' @param ploidy data ploidy (an even number between 2 and 20), default=2. #' @param tau to be used for Martini's method, default=1. #' @param omega to be used of Martini's method, default=1. #' @param roundVar only used for Munoz's method, how many digits to consider the relationship be of same class, default=2. #' @param ASV if TRUE, transform matrix into average semivariance (ASV) equivalent (K = K / (trace(K) / (nrow(K)-1))). Details formula 2 of Fieldmann et al. (2022). Default = FALSE. #' #' @return H Matrix with the relationship between the individuals based on pedigree and corrected by molecular information #' #' @examples #' \dontrun{ #' data(ped.sol) #' data(snp.sol) #' #Computing the numerator relationship matrix 10% of double-reduction #' Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) #' #Computing the additive relationship matrix based on VanRaden (modified) #' Gmat <- Gmatrix(snp.sol, ploidy=4, #' maf=0.05, method="VanRaden") #' Gmat <- round(Gmat,3) #to be easy to invert #' #' #Computing H matrix (Martini) #' Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", #' ploidy=4, #' maf=0.05) #' #' #Computing H matrix (Munoz) #' Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, #' ploidy=4, method="Munoz", #' roundVar=2, #' maf=0.05) #' } #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @references \emph{Feldmann MJ, et al. 2022. Average semivariance directly yields accurate estimates of the genomic variance in complex trait analyses. G3 (Bethesda), 12(6).} #' @references \emph{Munoz, PR. 2014 Unraveling additive from nonadditive effects using genomic relationship matrices. Genetics 198, 1759-1768} #' @references \emph{Martini, JW, et al. 2018 The effect of the H-1 scaling factors tau and omega on the structure of H in the single-step procedure. Genetics Selection Evolution 50(1), 16} #' @references \emph{Legarra, A, et al. 2009 A relationship matrix including full pedigree and genomic information. Journal of Dairy Science 92, 4656–4663} #' @export Hmatrix <- function(A=NULL, G=NULL, markers=NULL, c=0, method="Martini", tau=1, omega=1, missingValue=-9, maf=0, ploidy=2, roundVar=3, ASV=FALSE ){ Aorig <- A Gorig <- G Time = proc.time() cat("Comparing the matrices... \n") An <- rownames(Aorig) Gn <- rownames(Gorig) missingGmatrix <- which(is.na(match(An,Gn))) missingAmatrix <- which(is.na(match(Gn,An))) if(length(missingAmatrix)>0){ Gnhat <- Gn[-missingAmatrix] }else{ Gnhat <- Gn } if(length(missingGmatrix)>0){ Anhat <- An[-missingGmatrix] }else{ Anhat <- An } A <- Aorig#[Anhat,Anhat] G <- Gorig[Gnhat,Gnhat] missingGmatrix <- An[missingGmatrix] missingAmatrix <- Gn[missingAmatrix] Time = as.matrix(proc.time()-Time) cat("Completed! Time =", Time[3]/60," minutes \n") cat("Computing the H matrix... \n") Time = proc.time() if(method=="Martini"){ idA <-rownames(A) idG <- rownames(G) idH <- unique(c(idG,idA)) idH <- rev(idH) A <- A[idH,idH] index = is.na(match(idH,idG)) A11 <- A[index,index] A12 <- A[index,!index] A21 <- A[!index,index] A22 <- A[!index,!index] G22 <- G[idH[!index],idH[!index]] #if(is.singular.matrix(G22)) # stop(deparse("Matrix G22 is singular (not invertible)")) A22inv = solve(A22) #A is always invertible G22inv = try(solve(G22),silent=TRUE) if(inherits(G22inv,"try-error")){ cat(G22inv) stop("G22 not inverting with solve(), try a different/modified G matrix") } H22 = solve((tau*G22inv+(1-omega)*A22inv)) H11 = A12 %*% A22inv %*% (H22-A22) %*% A22inv %*% A21 H12 = A12 %*% A22inv %*% (H22-A22) H21 = (H22-A22) %*% A22inv%*%A21 H22 = (H22-A22) H = A+cbind(rbind(H11,H21),rbind(H12,H22)) if (ASV) { H = get_ASV(H) } Time = as.matrix(proc.time()-Time) cat("\n","Completed! Time =", Time[3]/60," minutes \n") return(H) } if(method=="Munoz"){ A <- Aorig[Anhat,Anhat] if(is.null(markers)) stop("Aborting: For Munoz method you need to specify method object") markersmatrix <- Gmatrix(markers,method="MarkersMatrix",ploidy=ploidy,missingValue=missingValue,maf=maf) #Computing the Variance of G by A classes (A rounded by roundVar) classes <- as.numeric(levels(as.factor(A))) classes <- unique(round(classes,roundVar)) n <- length(classes) varA <- meanG <- matrix(NA,nrow=nrow(A),ncol=nrow(A)) varAclasses <- c() for(i in 1:n){ varA[round(A,roundVar)==classes[i]] <- varAclasses[i] <- var(G[round(A,roundVar)==classes[i]]) meanG[round(A,roundVar)==classes[i]] <- mean(G[round(A,roundVar)==classes[i]]) } varAclasses[varAclasses==0] = NA varAclasses[is.infinite(varAclasses)] <- NA tmp <- which(is.na(varAclasses)) for(i in 1:length(tmp)){ varAclasses[tmp[i]]<-zoo::na.approx(varAclasses)[tmp[i]] varA[round(A,roundVar)==classes[tmp[i]]] <- varAclasses[tmp[i]] } #Computaing beta and H beta <- 1 - (c+(1/(markersmatrix[Gnhat,Gnhat]))/varA) H <- beta*(G-A)+A ###### Aorig[Anhat,Anhat] = H if (ASV) { Aorig = get_ASV(Aorig) } Time = as.matrix(proc.time()-Time) cat("\n","Completed! Time =", Time[3]/60," minutes \n") cat("\n","Returning H = A matrix corrected by G... \n") return(Aorig) } }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/Hmatrix.R
######################################### # # Package: AGHmatrix # # File: Mcheck.R # Contains: Mcheck # # Written by Luis F V Ferrao and Rodrigo Amadeu # # First version: Feb-2014 # Last update: 30-Mar-2023 # License: GPL-3 # ######################################### #' Check and filter markers #' #' This function does different filtering on the marker matrix #' #' @param SNPmatrix matrix (n x m), where n is is individual names and m is marker names (coded inside the matrix as 0, 1, 2, ..., ploidy, and, missingValue). #' @param ploidy data ploidy (an even number between 2 and 20). Default=2. #' @param missingValue missing value in data. Default=-9. #' @param thresh.missing threshold on missing data, SNPs below of this frequency value will be maintained, if equal to 1, no threshold and imputation is considered. Default = 0.50. #' @param thresh.maf minimum allele frequency accepted to each marker. Default=0.05. #' @param thresh.htzy threshold heterozigosity, remove SNPs below this threshold. Default=0. #' @param impute.method "mean" to impute the missing data by the mean per marker, "mode" to impute the missing data by the mode per marker, "global.mean" to impute the missing data by the mean across all markers, "global.mode" to impute the missing data my the mode across all marker. Default = "mean". #' @param rmv.mono if monomorphic markers should be removed. Default=TRUE. #' #' @return SNPmatrix after filtering steps. #' #' @examples #' data(snp.pine) #' M = Mcheck(snp.pine) #' #' @author Luis F V Ferrao and Rodrigo Amadeu, \email{rramadeu@@gmail.com} #' #' @export #' Mcheck = function(SNPmatrix = NULL, ploidy=2, missingValue = -9, thresh.maf = 0.05, thresh.missing = 0.9, thresh.htzy = 0, impute.method = "mean", rmv.mono=TRUE){ # SNP missing data ncol.init <- ncol(SNPmatrix) if (!is.na(missingValue)) { m <- match(SNPmatrix, missingValue, 0) SNPmatrix[m > 0] <- NA } missing <- apply(SNPmatrix, 2, function(x) sum(is.na(x))/nrow(SNPmatrix)) missing.low = missing <= thresh.missing cat("\nMissing data check: \n") if(any(missing.low)){ cat("\tTotal SNPs:", ncol(SNPmatrix),"\n") cat("\t",ncol(SNPmatrix) - sum(missing.low), "SNPs dropped due to missing data threshold of", thresh.missing,"\n") cat("\tTotal of:",sum(missing.low), " SNPs \n") idx.rm <- which(missing.low) SNPmatrix <- SNPmatrix[, idx.rm, drop=FALSE] } else{ cat("\tNo SNPs with missing data, missing threshold of = ", thresh.missing,"\n") } # Minor alele frequency MAF <- apply(SNPmatrix, 2, function(x) { AF <- mean(x, na.rm = T)/ploidy MAF <- ifelse(AF > 0.5, 1 - AF, AF) # Minor allele freq can be ref allele or not }) snps.low <- MAF < thresh.maf cat("\nMAF check: \n") if(any(snps.low)){ cat("\t",sum(snps.low), "SNPs dropped with MAF below", thresh.maf,"\n") cat("\tTotal:",ncol(SNPmatrix) - sum(snps.low), " SNPs \n") idx.rm <- which(snps.low) SNPmatrix <- SNPmatrix[, -idx.rm, drop=FALSE] } else{ cat("\tNo SNPs with MAF below", thresh.maf,"\n") } # monomorphic SNPs if(rmv.mono){ mono = (apply(SNPmatrix, 2, var, na.rm=TRUE)==0) mono[is.na(mono)] = TRUE cat("\nMonomorphic check: \n") if(any(mono)){ cat("\t",sum(mono), "monomorphic SNPs \n") cat("\tTotal:",ncol(SNPmatrix) - sum(mono), "SNPs \n") idx.rm <- which(mono) SNPmatrix <- SNPmatrix[, -idx.rm, drop=FALSE] } else{ cat("\tNo monomorphic SNPs \n") } } # Imputation if(impute.method=="global.mean"){ ix <- which(is.na(SNPmatrix)) if (length(ix) > 0) { SNPmatrix[ix] <- mean(SNPmatrix,na.rm = TRUE) } } if(impute.method=="global.mode"){ ix <- which(is.na(SNPmatrix)) if (length(ix) > 0) { SNPmatrix[ix] <- as.integer(names(which.max(table(SNPmatrix)))) } } if(impute.method=="mean"){ imputvalue = apply(SNPmatrix,2,mean,na.rm=TRUE) ix = which(is.na(SNPmatrix),arr.ind=TRUE) SNPmatrix[ix] = imputvalue[ix[,2]] } if(impute.method=="mode"){ imputvalue = apply(SNPmatrix, 2, function(x) as.integer(names(which.max(table(x))))) ix = which(is.na(SNPmatrix),arr.ind=TRUE) SNPmatrix[ix] = imputvalue[ix[,2]] } # Heterozigosity htrz <- apply(SNPmatrix, 2, function(x) sum( x!= 0 & x != ploidy,na.rm=T)/nrow(SNPmatrix)) htrz.low = htrz < thresh.htzy cat("\nHeterozigosity data check: \n") if(any(htrz.low)){ cat("\tTotal SNPs:", ncol(SNPmatrix),"\n") cat("\t",ncol(SNPmatrix) - sum(htrz.low), "SNPs dropped due to heterozygosity threshold of", thresh.htzy,"\n") cat("\tTotal of:",sum(htrz.low), " SNPs \n") idx.rm <- which(htrz.low) SNPmatrix <- SNPmatrix[, idx.rm, drop=FALSE] } else{ cat("\tNo SNPs with heterozygosity, missing threshold of = ", thresh.htzy,"\n") } # Total of SNPs cat("\nSummary check: \n") cat("\tInitial: ", ncol.init, "SNPs \n") cat("\tFinal: ", ncol(SNPmatrix), " SNPs (", ncol.init - ncol(SNPmatrix), " SNPs removed) \n \n") return(SNPmatrix) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/Mcheck.R
######################################### # # Package: AGHmatrix # # File: datatreat.R # Contains: datatreat asciitonumber # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 24-Apr-2015 # License: GPL-3 # ######################################### #' Organizes pedigree data in a chronological way #' #' This function organizes pedigree data in a chronological way and return 3 lists: i) parental 1 values (numeric); ii) parental 2 values (numeric); iii) real names of the individuals. Also save a .txt file with new pedigree file. #' @param data name of the pedigree data frame. Default=NULL. #' @param unk the code of the data missing. Default=0. #' @param n.max max number of iteractions to get the chronological order. Default = 50 #' @param save if TRUE, save the genealogy in a .txt file #' #' @return list with parental 1, parental 2, and real names of the individuals (key) also saves a txt file with the new chronological pedigree. #' #' @examples #' data(ped.mrode) #' datatreat(ped.mrode) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export datatreat <- function(data=NULL, n.max=50, unk=0, save=FALSE ){ indicator <- k <- 0 if(is.null(data)) stop(deparse("Select a data name")) for( i in 1:n.max){ #Data Treatment if( i == 1){ data <- as.matrix(data) k <- rep(NA,2) #only for don't stop the loop on the first time } if( i > 1 ) data <- new.data pedigree <- asciitonumber(data,unk=unk) ind.data <- pedigree$ind.data sire <- pedigree$sire dire <- pedigree$dire ind <- c(1:length(sire)) right.pos <- rep(NA, length=length(sire)) #Verify alternatively sire and dire each iteraction+1 parent <- sire parent.ind <- "sire" if( indicator%%2 == 1 ){ parent <- dire parent.ind <- "dire" } for ( j in 1:length(parent)){ if( is.na(match(parent[j], ind[1:j])) && parent[j] != 0){ right.pos[j] <- which(ind == parent[j]) } } error <- c() for( j in 1:length(parent)) if( parent[j] > j) error <- c(error,j) #Print the step point if( save ){ cat( paste("iteraction #",i,parent.ind,"\n",sep="")) cat( paste(error,"\n")) } #Right positions of the new data if( length(error) > 0 ){ after <- which( !is.na(right.pos)) before <- right.pos[after] for( j in 1:length(before)){ if( j == 1 || before[j] != before[j-1] ){ ind[after[j]] <- before[j] ind[before[j]] <- after[j] } } } new.data <- data[ind,] # Verify changes in the loop lastk <- k #indicator that there is no more changes in the last parent k <- length(error) #indicator that no changes if ( k == 0 ) indicator = indicator+1 if ( i != 1 && k == 0 && lastk == 0 ){ cat("Your data was chronologically organized with success. \n") if(save){ cat(paste("orgnew.txt",sep="")) write.table(new.data, file=paste("orgped.txt",sep=""), quote=FALSE, row.names=FALSE, col.names=FALSE) } return(pedigree) } if ( i == n.max ){ cat("Your data was not chronologically organized with sucess. Check your data with missing.data function and/or verify the individuals in the 2 last iteractions above descripted (the number is the row in the file: \n") cat(paste("orgped.txt",sep="")) write.table(new.data, file=paste("orgped.txt",sep=""), quote=FALSE, row.names=FALSE, col.names=FALSE) return(pedigree) } } } # This function creates a list with numeric indices given a pedigree data. # Also it checks if all the listed parent name are listed before in the individual # name column and if the parent exist in the matrix . asciitonumber <- function( pedigree.data, unk=0 ){ if( ncol(pedigree.data) != 3 ){ print("Data with more than 3 columns, please verify") return() } ind.data <- as.vector(c(unk,pedigree.data[,1])) sire.data <- as.vector(pedigree.data[,2]) dire.data <- as.vector(pedigree.data[,3]) sire <- match(sire.data, ind.data) dire <- match(dire.data, ind.data) ind <- as.vector(c(1:length(ind.data))) sire <- sire-1 dire <- dire-1 ind <- ind[-length(ind)] ind.data <- ind.data[-1] pedigree <- list(sire=sire,dire=dire,ind.data=pedigree.data[ind,1]) return(pedigree) } # This function organizes pedigree data in a chronological way and return 3 lists: # i) parental 1 values (numeric); ii) parental 2 values (numeric); iii) real names of # the individuals. Also save a .txt file with new pedigree file. sortped<-function(data = NULL, loop.in = 1000, loop.between = 100, print = FALSE) { if (is.null(data)) stop(deparse("Please define the variable data")) stop.loop.1 <- stop.loop.2 <- FALSE for(j in 1:loop.between){ if(print) cat(paste("looping between...",print(j))) for(i in 1:loop.in){ if(print) cat(paste("looping in first parent...",print(i))) ind<-data[,1] sire<-data[,2] dire<-data[,3] index<-1:length(ind) compare<-match(dire,ind) compare[which(is.na(compare))]<-0 loop<-which(compare>index) newindex<-index newindex[loop[1]]<-compare[loop[1]] newindex[compare[loop[1]]]<-loop[1] data<-data[newindex,] if(print) print(length(loop)) if( length(loop) == 0 && i == 1) stop.loop.1 <- TRUE if(length(loop)==0) break } for(i in 1:loop.in){ if(print) cat(paste("looping in second parent...",print(i))) ind<-data[,1] sire<-data[,2] dire<-data[,3] index<-1:length(ind) compare<-match(sire,ind) compare[which(is.na(compare))]<-0 loop<-which(compare>index) newindex<-index newindex[loop[1]]<-compare[loop[1]] newindex[compare[loop[1]]]<-loop[1] data<-data[newindex,] if(print) print(length(loop)) if( length(loop) == 0 && i == 1) stop.loop.2 <- TRUE if( length(loop) == 0 ) break } if( stop.loop.1 && stop.loop.2) break } return(data) } # This function verify which rows in a pedigree data has missing parental or conflictuos data verifyped <- function(pedigree, unk=0 ){ flag<-FALSE if( ncol(pedigree) != 3 ){ print("Data with more than 3 columns, please verify") flag<-TRUE return(flag) } if(length(unique(pedigree[,1]))<nrow(pedigree)){ print("Data with repeated entry, please verify the following entries lines") print(which(duplicated(pedigree[,1]),arr.ind=TRUE)) flag<-TRUE return(flag) } #Treating all as numeric ind.data <- as.vector(pedigree[,1]) sire.data <- as.vector(pedigree[,2]) dire.data <- as.vector(pedigree[,3]) sire <- match(sire.data, c(ind.data,"0")) dire <- match(dire.data, c(ind.data,"0")) ind <- as.vector(c(1:length(ind.data))) missing <- c() #Verify the individual w/ same name in sire/dire missing$conflict <- c(which(sire == ind),which( dire == ind)) if(length(missing$conflict)>0){ print("The following rows have the individual name equals to the parental name. Please verify.") print(pedigree[missing$conflict,]) flag<-TRUE } #Verify the missing sire (Parent 1) missing$sire.na <- c( which(is.na(sire))) if(length(missing$sire)>0){ print("The following rows have the parental 1 name (column 2) missing in the pedigree. Please verify.") print(pedigree[missing$sire,]) flag<-TRUE } #Verify the missing dire (Parent 2) missing$dire.na <- c( which(is.na(dire))) if(length(missing$dire)>0){ print("The following rows have the parental 2 name (column 3) missing in the pedigree. Please verify.") print(pedigree[missing$dire,]) flag<-TRUE } return(flag) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/datatreat.R
######################################################################### # # Package: AGHmatrix # # File: expandAmatrix.R # Contains: expandAmatrix # # Written by Rodrigo Rampazo Amadeu # # First version: Oct-2021 # Last update: 03-Nov-2019 # License: GPL-3 # ######################################################################### #' Add new crosses to a current A matrix #' #' Expand a current A matrix with a new pedigree. The parents in the new pedigree should also be in the A matrix. #' #' @param newPedigree pedigree data name (3-column way format). Unknown value should be equal 0. #' @param A numerator relationship matrix output from Amatrix function. #' @param returnAll if TRUE returns old A with new A, if FALSE returns only new A #' #' @return Matrix with the Relationship between the individuals. #' #' @examples #' data(ped.sol) #' ped.initial = ped.sol[1:1120,] #' ped.new = ped.sol[-c(1:1120),] #' #Computing additive relationship matrix: #' A = Amatrix(ped.initial, ploidy=2) #' Anew = expandAmatrix(ped.new, A) #' #' #Comparing with one-step building.. #' Afull = Amatrix(ped.sol, ploidy=2) #' test = Anew-Afull #' which(test!=0) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export expandAmatrix <- function(newPedigree = NULL, A=NULL, returnAll=TRUE){ if( is.null(newPedigree)) stop(deparse("newPedigree argument is missing")) if( is.null(A)) stop(deparse("A argument is missing")) if(any(is.na(match(newPedigree[,2],rownames(A))))+any(is.na(match(newPedigree[,3],rownames(A))))) stop(deparse("There are individuals in the new pedigree with missing parents in A")) ## Creating a line of 0s A = rbind(A,rep(0,nrow(A))) A = cbind(A,c(rep(0,ncol(A)),1)) colnames(A)[ncol(A)] = rownames(A)[ncol(A)] = "0" NewA = A[match(newPedigree[,2],rownames(A)),match(newPedigree[,2],rownames(A))] + A[match(newPedigree[,2],rownames(A)),match(newPedigree[,3],rownames(A))] + A[match(newPedigree[,3],rownames(A)),match(newPedigree[,2],rownames(A))] + A[match(newPedigree[,3],rownames(A)),match(newPedigree[,3],rownames(A))] NewA = NewA/4 diag(NewA) = 1+diag(A[match(newPedigree[,2],rownames(A)),match(newPedigree[,3],rownames(A))])/2 if(!returnAll){ rownames(NewA)=colnames(NewA)=newPedigree[,1] return(NewA) }else{ NewOldA = A[match(newPedigree[,2],rownames(A)),] + A[match(newPedigree[,3],rownames(A)),] NewOldA = NewOldA/2 NewOldA = NewOldA[,-ncol(NewOldA)] NewOldA = cbind(rbind(A[-ncol(A),-ncol(A)],NewOldA), rbind(t(NewOldA),NewA)) return(NewOldA) } }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/expandAmatrix.R
######################################### # # Package: AGHmatrix # # File: filterpedigree.R # Contains: filterpedigree # # Written by Rodrigo Rampazo Amadeu # # First version: Apr-2021 # Last update: 09-Apr-2021 # License: GPL-3 # ######################################### #' Filter the pedigree to keep only the genealogy of a subset of individuals #' #' Filter the pedigree to keep only the genealogy of a subset of individuals #' @param inds vector with strings of individuals to keep their genealogy in the matrix #' @param data name of the pedigree data frame. Default=NULL. #' #' @return a data frame with pedigree containing the genealogy of the selected individuals #' #' @examples #' data(ped.sol) #' new.ped.sol = filterpedigree(inds = c("MSW168-2","W14090-3","W14090-4"),data=ped.sol) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export filterpedigree <- function(inds, data){ output <- NULL progress <- round(length(inds)/10) perc <- 10 for(i in 1:length(inds)){ if(length(inds)>100){ if(i %% progress ==0){ cat(paste0(perc,"% \n")) perc=perc+10 } } ped_out<- data[which(data[,1] == inds[i]),] if(nrow(ped_out)==0){ stop(deparse(paste(inds[i],"doesn't exist in this pedigree."))) } trigger <- 1 while(trigger>0){ init <- nrow(ped_out) ped_in <- data[which(data[,1] %in% c(ped_out[,2],ped_out[,3])),] ped_out <- unique(rbind(ped_in, ped_out)) trigger <- nrow(ped_out)-init } output = rbind(output,ped_out) output = unique(output) } return(output) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/filterpedigree.R
######################################################################### # # Package: AGHmatrix # # File: formatmatrix.R # Contains: formatmatrix # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 20-May-2015 # License: GPL-3 # ####################################### #' Transform a matrix in 3 columns #' #' Given any square matrix transform it in a 3 columns way (row, column, value) mainly to be used in outsourcing data processing (as ASREML-standalone) #' #' @param data matrix (nxn). #' @param save if TRUE save the output in a file. Default=TRUE. #' @param return if TRUE return the output in a object. Default=FALSE. #' @param name name of the csv file to be saved. Default=data name. #' @param round.by select the number of digits after 0 you want in your data. Default = 12 #' @param exclude.0 if TRUE, remove all lines equal to zero (ASREML option). Default = TRUE #' #' @return a object or a csv file with a table with 3 columns representing the matrix. #' #' @examples #' #Example with random matrix #' data<-matrix(c(1,0.1,0,0.1,1,0,0,0,1.1),3) #' formatmatrix(data=data,save=FALSE,return=TRUE,exclude.0=TRUE) #' #' #Example with pedigree matrix #' #Reading the example data #' data(ped.mrode) #' #Making Relationship Matrix #' Amrode<-Amatrix(ped.mrode) #' #Inverting the Matrix #' Amrode.inv<-solve(Amrode) #' #Making the 3 columns format #' Amrode.inv.ASREML<-formatmatrix(Amrode,save=FALSE,return=TRUE,exclude.0=TRUE) #' #Printing it #' Amrode.inv.ASREML #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export formatmatrix <- function( data = NULL, save = TRUE, return = FALSE, name = deparse(substitute(data)), round.by = 12, exclude.0 = TRUE) { if( is.null(data)) stop(deparse("Define a matrix data")) cat("Converting to column format... \n") Time <- proc.time() n <- nrow(data) first <- second <- third <- c() for( i in 1:n ){ first <- c(first,rep(i,i)) second <- c(second,c(1:i)) } third <- data[upper.tri(data,diag=TRUE)] third<-round(third,round.by) columns <- cbind(first, second, third) if(exclude.0) columns<-columns[third!=0,] if(save){ write.table(columns,file=paste(name,".csv",sep=""),sep=" ",row.names=FALSE,col.names=FALSE) cat(paste("Saved as ",name,".csv"," \n",sep="")) } Time = as.matrix(proc.time()-Time[3]) cat("Completed! Time =", Time[3]/60," minutes \n") if(return) return( columns ) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/formatmatrix.R
######################################### # # Package: AGHmatrix # # File: missingdata.R # Contains: missingdata # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 14-Apr-2015 # License: GPL-3 # ######################################### #' Survying on missing data #' #' This function verify which rows in a pedigree data has missing parental or conflictuos data #' #' @param data data name from a pedigree list #' @param unk unknown value of your data #' #' @return list with $conflict: rows of the data which are at least one parental name equal to the individual. $missing.sire: rows of the data which arie missing data sire (Parental 1) information. $missing.dire: same as above for dire (Parental 2). $summary.missing: summary of the missing data. 2 columns, 1st for the name of the parental listed, 2nd for the how many times appeared in the data. #' #' @examples #' data(ped.mrode) #' missingdata(ped.mrode) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export missingdata <- function(data, unk=0 ){ pedigree.data<-data data <- c() if( ncol(pedigree.data) != 3 ){ print("Data with more than 3 columns, please verify") return() } #Treating all as numeric ind.data <- as.vector(c(unk,as.character(pedigree.data[,1]))) sire.data <- as.vector(pedigree.data[,2]) dire.data <- as.vector(pedigree.data[,3]) sire <- match(sire.data, ind.data) dire <- match(dire.data, ind.data) ind <- as.vector(c(1:length(ind.data))) missing <- c() #Verify the individual w/ same name in sire/dire missing$conflict <- c(which( sire == ind[-1]),which( dire == ind[-1] )) #Verify the missing sire (Parent 1) missing$sire.na <- c( which(is.na(sire))) #Verify the missing dire (Parent 2) missing$dire.na <- c( which(is.na(dire))) #Making a summary of the missing data missing$sire <- as.matrix(summary(as.factor(pedigree.data[which(is.na(sire)),2]))) missing$dire <- as.matrix(summary(as.factor(pedigree.data[which(is.na(dire)),3]))) names <- unique(c(rownames(missing$sire),rownames(missing$dire))) pos.sire <- match(rownames(missing$sire),names) pos.dire <- match(rownames(missing$dire),names) missing$parent <- rep(0,length(names)) missing$parent[pos.dire] <- missing$dire missing$parent[pos.sire] <- missing$parent[pos.sire]+missing$sire missing$parent <- as.matrix(missing$parent) rownames(missing$parent) <- names #Final list missing <- list(conflict=missing$conflict,missing.sire=missing$sire.na,missing.dire=missing$dire.na,summary.missing=missing$parent) ## Improve this part :) #if(molecular){ # if( csv ) # data <- read.csv("molecular_diploid.csv") # mol.data <- data[,-1] # row.names(mol.data) <- data[,1] # mol.data <- replace(mol.data, mol.data == unk, NA) # missing.per.ind <- apply(mol.data,1, function(x) sum(is.na(x)))# / ncol(example) * 100 # missing.per.marker <- apply(t(mol.data),1,function(x) sum(is.na(x))) # summary(missing.per.ind)/length(missing.per.marker) # summary(missing.per.marker)/length(missing.per.ind) #} return(missing) }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/missingdata.R
#' Pedigree Data #' #' Data from pedigree example proposed by Mrode 2005 #' #' @docType data #' #' @usage data(ped.mrode) #' #' @format table #' #' @keywords datasets #' #' #' @references R. A. Mrode, R. Thompson. Linear Models for the Prediction of Animal Breeding Values. CABI, 2005. #' #' @examples data(ped.mrode) #' #' @name ped.mrode NULL
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/ped.mrode-data.R
#' Pedigree data for autopolyploid examples #' #' Dataset extract from supplementary material from Endelman et al. (2018). #' Pedigree data frame of Potato population, missing data as 0. #' #' @docType data #' #' @usage data(ped.sol) #' #' @format data.frame #' #' @keywords datasets #' #' @references Endelman, JB, et al., 2018 Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. Genetics, 209(1) pp. 77-87. #' #' @examples data(ped.sol) #' #' @name ped.sol NULL
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/ped.sol-data.R
#' Molecular data for diploid examples #' #' Dataset extract from supplementary material from Resende et al. (2012). #' SNP marker matrix from Pine tree coded as 0,1, and 2, and missing value as -9. #' #' @docType data #' #' @usage data(snp.pine) #' #' @format matrix #' #' @keywords datasets #' #' @references Resende, MF, et al., 2012 Accuracy of genomic selection methods in a standard data set of loblolly pine (Pinus taeda l.). Genetics 190: 1503–1510. #' #' @examples data(snp.pine) #' #' @name snp.pine NULL
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/snp.pine-data.R
#' Molecular data for autopolyploid examples #' #' Dataset extract from supplementary material from Endelman et al. (2018). #' SNP marker matrix from Pine tree coded as 0,1,2,3,4 and missing value as -9. #' #' @docType data #' #' @usage data(snp.sol) #' #' @format data.frame #' #' @keywords datasets #' #' @references Endelman, JB, et al., 2018 Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. Genetics, 209(1) pp. 77-87. #' #' @examples data(snp.sol) #' #' @name snp.sol NULL
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/R/snp.sol-data.R
## ----knitr_init, echo=FALSE, cache=FALSE-------------------------------------- library(knitr) library(rmarkdown) knitr::opts_chunk$set(collapse = TRUE, comment = "#>", fig.width = 7, fig.height = 8, fig.align = "center", dev = "png", dpi = 72, cache = TRUE) ## ---- echo=FALSE, results='hide'---------------------------------------------- library(AGHmatrix) ## ---- eval=FALSE-------------------------------------------------------------- # ## Install stable version # install.packages("AGHmatrix") # # ## Install development version # install.packages("devtools") # devtools::install_github("rramadeu/AGHmatrix") # # ## Load # library(AGHmatrix) ## ----------------------------------------------------------------------------- data(ped.mrode) ped.mrode str(ped.mrode) #check the structure ## ---- eval=FALSE-------------------------------------------------------------- # #Computing additive relationship matrix for diploids (Henderson 1976): # Amatrix(ped.mrode, ploidy=2) # # #Computing dominant relationship matrix for diploids (Cockerham 1954): # Amatrix(ped.mrode, ploidy=2, dominance=TRUE) # # #Computing additive relationship matrix for autotetraploids (Kerr 2012): # Amatrix(ped.mrode, ploidy=4) # # #Computing additive relationship matrix for autooctaploids (Kerr 2012): # Amatrix(ped.mrode, ploidy=8) # # #Computing additive relationship matrix for autotetraploids # # and double-reduction of 0.1 (Kerr 2012): # Amatrix(ped.mrode, ploidy=4, w=0.1) # # #Computing additive relationship matrix for autotetraploids # # and double-reduction of 0.1 as in Slater et al. (2014): # Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) # #not recommended, but kept in the package to reproduce some former analysis # # #Computing additive relationship matrix for autohexaploids # # and double-reduction of 0.1 (Kerr 2012): # Amatrix(ped.mrode, ploidy=6, w=0.1) ## ---- eval=FALSE-------------------------------------------------------------- # ?Amatrix ## ----------------------------------------------------------------------------- data(snp.pine) snp.pine[1:5,1:5] str(snp.pine) ## ---- eval=FALSE-------------------------------------------------------------- # #Computing the additive relationship matrix based on VanRaden 2008 # G_VanRadenPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, # maf=0.05, method="VanRaden") # # #Computing the additive relationship matrix based on Yang 2010 # G_YangPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, # maf=0.05, method="Yang") # # #Computing the dominance relationship matrix based on Su 2012 # G_SuPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, # maf=0.05, method="Su") # # #Computing the dominance relationship matrix based on Vitezica 2013 # G_VitezicaPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, # maf=0.05, method="Vitezica") ## ---- eval=FALSE-------------------------------------------------------------- # ?Gmatrix ## ---- eval=FALSE-------------------------------------------------------------- # #Loading the data # data(snp.sol) # str(snp.sol) # # #Computing the additive relationship matrix based on VanRaden 2008 # # adapted by Ashraf 2016 # G_VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) # # #Computing the dominance (digenic) matrix based on Endelman 2018 (Eq. 19) # G_Dominance <- Gmatrix(snp.sol, method="Endelman", ploidy=4) # # #Computing the full-autopolyploid matrix based on Slater 2016 (Eq. 8 # #and 9) # G_FullAutopolyploid <- Gmatrix(snp.sol, method="Slater", ploidy=4) # # #Computing the pseudodiploid matrix based on Slater 2016 (Eq. 5, 6, # #and 7) # G_Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) # # #Computing G matrix with specific weight for each marker as # # in Liu et al. (2020). # Gmatrix_weighted <- Gmatrix(snp.sol, method="VanRaden", weights = runif(3895,0.001,0.1), ploidy=4) ## ---- eval=FALSE-------------------------------------------------------------- # ?Gmatrix ## ---- eval=FALSE-------------------------------------------------------------- # #Loading the data # library(AGHmatrix) # data(snp.sol) # snp.sol.ratio = snp.sol/4 #transforming it in a ratio of the minor allele frequency # Gmatrix <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, ratio=FALSE) # Gmatrix.ratio <- Gmatrix(snp.sol.ratio, method="VanRaden", ploidy=4, ratio=TRUE) # Gmatrix[1:5,1:5]==Gmatrix.ratio[1:5,1:5] # # ## it also has the ploidy.correction option # Gmatrix.alternative <- Gmatrix(snp.sol, # method="VanRaden", # ploidy=4, # ratio=FALSE, # ploidy.correction=TRUE) # # Gmatrix.ratio.alternative <- Gmatrix(snp.sol.ratio, # method="VanRaden", # ploidy=4, # ratio=TRUE, # ploidy.correction=TRUE) # Gmatrix[1:5,1:5]==Gmatrix.alternative[1:5,1:5] # Gmatrix.alternative[1:5,1:5]==Gmatrix.ratio.alternative[1:5,1:5] ## ---- eval=FALSE-------------------------------------------------------------- # data(ped.sol) # data(snp.sol) # # #Computing the numerator relationship matrix 10% of double-reduction # Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) # Gmat <- Gmatrix(snp.sol, ploidy=4, # maf=0.05, method="VanRaden") # Gmat <- round(Gmat,3) #see appendix # # #Computing H matrix (Martini) # Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", # ploidy=4, missingValue=-9, maf=0.05) # # #Computing H matrix (Munoz) # Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, # ploidy=4, method="Munoz", # missingValue=-9, maf=0.05) ## ---- eval=FALSE-------------------------------------------------------------- # data(snp.pine) # A <- Gmatrix(SNPmatrix=snp.pine, method="VanRaden", missingValue=-9, maf=0.05) # D <- Gmatrix(SNPmatrix=snp.pine, method="Vitezica", missingValue=-9,maf=0.05) ## ---- eval=FALSE-------------------------------------------------------------- # #Additive-by-Additive Interactions # A_A <- A*A # #Dominance-by-Additive Interactions # D_A <- D*A # #Dominance-by-Dominance Interactions # D_D <- D*D ## ---- eval=FALSE-------------------------------------------------------------- # #Additive-by-Additive-by-Additive Interactions # A_A_A <- A*A*A # #Additive-by-Additive-by-Dominance Interactions # A_A_D <- A*A*D # #Additive-by-Dominance-by-Dominance Interactions # A_D_D <- A*D*D # #Dominance-by-Dominance-by-Dominance Interactions # D_D_D <- D*D*D ## ---- eval=FALSE-------------------------------------------------------------- # #Loading the data example # data(ped.mrode) # # #Computing the matrix # A <- Amatrix(data=ped.mrode, ploidy=4, w=0.1) # # #Building its inverse # Ainv <- solve(A) # # #Exporting it. The function "formatmatrix" # # will convert it and save in your working directory # formatmatrix(Ainv, round.by=12, exclude.0=TRUE, name="Ainv") ## ----------------------------------------------------------------------------- pedigree = data.frame(id=1:8, parent1 = c(0,0,0,0,1,2,3,5), parent2 = c(0,0,0,0,2,3,4,6), parent3 = c(0,0,0,0,3,4,0,7), parent4 = c(0,0,0,0,0,0,0,1), parent5 = 0) print(pedigree) AmatrixPolyCross(pedigree) ## ----------------------------------------------------------------------------- AmatrixPolyCross(pedigree,fixedParent=TRUE) ## ---- eval=TRUE,echo=FALSE---------------------------------------------------- x = c(1000,5000,10000,20000,30000,40000,50000,60000,70000,80000,90000,100000)/1000 #Pedigree Size y = c(252156,622500,1795260,6481064,14313448,25227680,49081224,70622336,96017144,125320048,158444856,194731908)/1e+6 #RAM GB ytime = c(0.0025, 0.080, 0.2, 0.89, 1.62,3.01,4.52,7.12,9.15,13.13,15.13,20) #minutes df = data.frame(size=x,ram=y,time=ytime) plot(x=df$size,y=df$ram, ylab = "RAM (GB) at the peak of Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", type="b", axes=FALSE) axis(side = 2, at = c(0,4,8,16,32,48,64,96,144,192),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) plot(x=df$size,y=df$time, type="b", ylab = "Time to run (minutes) the Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", axes=FALSE) axis(side = 2, at = seq(0,20,2),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) ## ----eval=FALSE,echo=FALSE---------------------------------------------------- # #To knit an this vignette into an .R file # knitr::purl("vignettes/Tutorial_AGHmatrix.Rmd") ## ----------------------------------------------------------------------------- sessionInfo()
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/doc/Tutorial_AGHmatrix.R
--- title: "AGHmatrix Tutorial" author: "Rodrigo Amadeu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{AGHmatrix Tutorial} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r knitr_init, echo=FALSE, cache=FALSE} library(knitr) library(rmarkdown) knitr::opts_chunk$set(collapse = TRUE, comment = "#>", fig.width = 7, fig.height = 8, fig.align = "center", dev = "png", dpi = 72, cache = TRUE) ``` ```{r, echo=FALSE, results='hide'} library(AGHmatrix) ``` ## Contact Rodrigo R Amadeu rramadeu at gmail dot com https://rramadeu.github.io ## Overview AGHmatrix software is an R-package to build relationship matrices using pedigree (A matrix) and/or molecular markers (G matrix) with the possibility to build a combined matrix of Pedigree corrected by Molecular (H matrix). The package works with diploid and autopolyploid data. ## Matrices computation implemented in the `AGHmatrix` Currently the package computes the following 17 different relationship matrices: ### Pedigree-based relationship matrix (A matrix) | | Additive | Non-Additive | |-------------------|----------------------------|------------------| | **Diploid** | Henderson (1976) | Cockerham (1954) | | **Autopolyploid** | Kerr (2012), Slater (2013) | | ### Molecular-based relationship matrix (G matrix) | | Additive | Non-Additive | |-------------------|-------------------------------------------|--------------------------------| | **Diploid** | Yang (2010), VanRaden (2012), Liu (2020) | Su (2012), Vitezica (2013) | | **Polyploid** | Slater (2016), de Bem Oliveira (2019) | Slater (2016), Endelman (2018) | ### Combined pedigree and molecular-based relationship matrix (H matrix) | **Any ploidy/effect** | |----------------------------------------------| | Legarra (2009), Munoz (2014), Martini (2018) | Additionally there is a beta implementation to compute A matrix when parentage is not deterministic as in a polycross design. See `?AmatrixPolycross`. ## Citation To cite this R package: Amadeu RR, Garcia AA, Munoz PR, Ferrão LF. AGHmatrix: genetic relationship matrices in R. Bioinformatics. 2023 Jul 1;39(7):btad445. https://doi.org/10.1093/bioinformatics/btad445 ## Installing and loading Within R: ```{r, eval=FALSE} ## Install stable version install.packages("AGHmatrix") ## Install development version install.packages("devtools") devtools::install_github("rramadeu/AGHmatrix") ## Load library(AGHmatrix) ``` ## Relationship matrices using pedigree data - A matrix `Amatrix` process the pedigree and build the A-matrix related to that given pedigree. The matrix is built based in the recursive method presented in Mrode (2014) and described by Henderson (1976). This method is expanded for higher ploidies (n-ploidy) as detailed in Kerr et al. (2012). After loading the package you have to load your data file into the software. To do this, you can use the function `read.data()` or `read.csv()` for example. Your data should be available in R as a `data.frame` structure in the following order: column 1 must be the individual/genotype names (id), columns 2 and 3 must be the parent names. For the algorithm, it does not matter who is the mother and who is the father (so, no sex column). There is a pedigree data example (`ped.mrode`) that can be used to look at the structure and order the data. To load `ped.mrode`: ```{r} data(ped.mrode) ped.mrode str(ped.mrode) #check the structure ``` The example `ped.mrode` has 3 columns, column 1 contains the names of the individual/genotypes, column 2 contains the names of the first parent, column 3 contains the names of the second parental (example from Table 2.1 of Mrode 2014). There is no header template, and the unknown value must be equal 0. Your data has to be in the same format of `ped.mrode`. Internally the algorithm first pre-process the pedigree: the individuals are numerated $1$ to $n$. Then, it is verified whether the genotypes in the pedigree are in chronological order (i.e. if the parents of a given individual are located before to this individual in the pedigree data set). If this order is not followed, the algorithm performs the necessary changes to correct them in a iterative way. After this pre-processing, the matrix computation proceeds as in Henderson (1976) for diploid - for additive or dominance relationship - and as in Kerr et al. (2012) for autotetraploids - for additive relationship. For autotetraploids, there is the option to include double-reduction fraction. For diploids there is the option to compute the dominant relationship matrix (Cockerham, 1954). It follows some usage examples with the `ped.mrode`. ```{r, eval=FALSE} #Computing additive relationship matrix for diploids (Henderson 1976): Amatrix(ped.mrode, ploidy=2) #Computing dominant relationship matrix for diploids (Cockerham 1954): Amatrix(ped.mrode, ploidy=2, dominance=TRUE) #Computing additive relationship matrix for autotetraploids (Kerr 2012): Amatrix(ped.mrode, ploidy=4) #Computing additive relationship matrix for autooctaploids (Kerr 2012): Amatrix(ped.mrode, ploidy=8) #Computing additive relationship matrix for autotetraploids # and double-reduction of 0.1 (Kerr 2012): Amatrix(ped.mrode, ploidy=4, w=0.1) #Computing additive relationship matrix for autotetraploids # and double-reduction of 0.1 as in Slater et al. (2014): Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) #not recommended, but kept in the package to reproduce some former analysis #Computing additive relationship matrix for autohexaploids # and double-reduction of 0.1 (Kerr 2012): Amatrix(ped.mrode, ploidy=6, w=0.1) ``` More information about `Amatrix` can be found with: ```{r, eval=FALSE} ?Amatrix ``` ## Diploid G matrix: relationship matrices using the molecular data `Gmatrix` handles the molecular-marker matrix and builds the relationship matrix. Molecular markers data should be organized in a matrix format (individuals in rows and markers in columns) coded as 0, 1, 2 and missing data value (numeric or `NA`). Import your molecular marker data into `R` with the function `read.table()` or `read.csv()` and convert to a matrix format with the function `as.matrix()`. The function `Gmatrix` can be used to construct the additive relationship either as proposed by Yang et al. (2010) or the proposed by VanRaden (2008). The function can also construct the dominance relationship matrix either as proposed by Su et al. (2012) or as proposed by Vitezica et al. (2013). As an example, here we build the four matrices using real data from Resende et al. (2012). To load `snp.pine` and to check its structure: ```{r} data(snp.pine) snp.pine[1:5,1:5] str(snp.pine) ``` In this dataset, we have 926 individuals with 4853 markers and the missing data value is `-9`. It follows some examples with the `snp.pine` data where the unknown value (`missingValue`) is `-9`. Here we set minimum allele frequency to `0.05`, so markers with minor allele frequency lower than 0.05 are removed from the dataset prior to the G matrix construction. ```{r, eval=FALSE} #Computing the additive relationship matrix based on VanRaden 2008 G_VanRadenPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="VanRaden") #Computing the additive relationship matrix based on Yang 2010 G_YangPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Yang") #Computing the dominance relationship matrix based on Su 2012 G_SuPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Su") #Computing the dominance relationship matrix based on Vitezica 2013 G_VitezicaPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Vitezica") ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Autopolyploid G matrix: relationship matrices using the molecular data Molecular markers data should be organized in a matrix format (individual in rows and markers in columns) coded according to the dosage level: 0, 1, 2, ..., ploidy level, and missing data value (numeric user-defined or `NA`). As an example, an autotetraploid should be coded as 0, 1, 2, 3, 4, and a missing data value. In autopolyploids, the function `Gmatrix` can be used to construct: i) the additive relationship based on VanRaden (2008) and extended by Ashraf (2016); ii) the full-autopolyploid including additive and non-additive model as equations 8 and 9 in Slater et al. (2016); iii) the pseudo-diploid model as equations 5, 6, and 7 Slater et al. (2016). iv) the digenic-dominant model based on Endelman et al. (2018). As an example, here we build the matrices using data from Endelman et al. (2018) (`snp.sol`). There is also an option to build weighted relationship matrices as in Liu et al. (2020). The argument `ploidy.correction` defines the denominator of the formula for the `VanRaden` method. If `ploidy.correction=TRUE`, it uses the parametric correction as $\sum_i p f_i(1-f_i)$, where $p$ is the ploidy level and $f_i$ is the minor allele frequency of the $i_th$ marker. If `ploidy.correction=FALSE`, it uses the sampling variance correction as $\sum_i \frac{1}{p} s^2(m_i)$, where $s^2(m_i)$ is the sampling variance of the $i_th$ marker. Both corrections are equivalent when sampling size goes to the infinity. The default is to use the sampling variance as the correction (i.e., `ploidy.correction=FALSE`). ```{r, eval=FALSE} #Loading the data data(snp.sol) str(snp.sol) #Computing the additive relationship matrix based on VanRaden 2008 # adapted by Ashraf 2016 G_VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) #Computing the dominance (digenic) matrix based on Endelman 2018 (Eq. 19) G_Dominance <- Gmatrix(snp.sol, method="Endelman", ploidy=4) #Computing the full-autopolyploid matrix based on Slater 2016 (Eq. 8 #and 9) G_FullAutopolyploid <- Gmatrix(snp.sol, method="Slater", ploidy=4) #Computing the pseudodiploid matrix based on Slater 2016 (Eq. 5, 6, #and 7) G_Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) #Computing G matrix with specific weight for each marker as # in Liu et al. (2020). Gmatrix_weighted <- Gmatrix(snp.sol, method="VanRaden", weights = runif(3895,0.001,0.1), ploidy=4) ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Ratio (non-dosage) G matrix: relationship matrices using the molecular data without dosage calling Molecular markers data should be organized in a matrix format (individual in rows and markers in columns) coded according to a fraction that represents its molecular information, this can be any number between 0 to 1. Such ratio can represent the count of alternative alleles over the read depth for each individual-marker combination (GBS-like technique). It can be the signal of the alternative allele over the sum of the signals of the alternative and reference alleles (GCMS-like technique). It can also be used for family-pool genotypes. ```{r, eval=FALSE} #Loading the data library(AGHmatrix) data(snp.sol) snp.sol.ratio = snp.sol/4 #transforming it in a ratio of the minor allele frequency Gmatrix <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, ratio=FALSE) Gmatrix.ratio <- Gmatrix(snp.sol.ratio, method="VanRaden", ploidy=4, ratio=TRUE) Gmatrix[1:5,1:5]==Gmatrix.ratio[1:5,1:5] ## it also has the ploidy.correction option Gmatrix.alternative <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, ratio=FALSE, ploidy.correction=TRUE) Gmatrix.ratio.alternative <- Gmatrix(snp.sol.ratio, method="VanRaden", ploidy=4, ratio=TRUE, ploidy.correction=TRUE) Gmatrix[1:5,1:5]==Gmatrix.alternative[1:5,1:5] Gmatrix.alternative[1:5,1:5]==Gmatrix.ratio.alternative[1:5,1:5] ``` ## Combined relationship matrix - H matrix H matrix is the relationship matrix using combined information from the pedigree and genomic relationship matrices. First, you need to compute the matrices separated and then use them as input to build the combined H matrix. Two methods are implemented: `Munoz` shrinks the G matrix towards the A matrix scaling the molecular relatadness by each relationship classes; `Martini` is a modified version from Legarra et al. 2009 where combines A and G matrix using scaling factors. As an example, here we build the matrices using data from Endelman et al. (2018) (`ped.sol` and `snp.sol`). ```{r, eval=FALSE} data(ped.sol) data(snp.sol) #Computing the numerator relationship matrix 10% of double-reduction Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) Gmat <- Gmatrix(snp.sol, ploidy=4, maf=0.05, method="VanRaden") Gmat <- round(Gmat,3) #see appendix #Computing H matrix (Martini) Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", ploidy=4, missingValue=-9, maf=0.05) #Computing H matrix (Munoz) Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, ploidy=4, method="Munoz", missingValue=-9, maf=0.05) ``` ## Covariance matrices due to epistatic terms Here we present how to compute the epistasis relationship matrices using Hadamard products (i.e. cell-by-cell product), denoted by `*`. For more information please see Munoz et al. (2014). In this example we are using the molecular-based relationship matrices. First, build the additive and dominance matrices: ```{r, eval=FALSE} data(snp.pine) A <- Gmatrix(SNPmatrix=snp.pine, method="VanRaden", missingValue=-9, maf=0.05) D <- Gmatrix(SNPmatrix=snp.pine, method="Vitezica", missingValue=-9,maf=0.05) ``` For the first degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive Interactions A_A <- A*A #Dominance-by-Additive Interactions D_A <- D*A #Dominance-by-Dominance Interactions D_D <- D*D ``` For the second degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive-by-Additive Interactions A_A_A <- A*A*A #Additive-by-Additive-by-Dominance Interactions A_A_D <- A*A*D #Additive-by-Dominance-by-Dominance Interactions A_D_D <- A*D*D #Dominance-by-Dominance-by-Dominance Interactions D_D_D <- D*D*D ``` And so on... ## Exporting your matrix as three columns and sparse format (ASReml - csv format) That is the lower diagonal matrix formatted in three columns in .csv format (other ASCII extension could be used as well). In order to do this, we need to build a matrix, its inverse, and export it using `formatmatrix` function. ASReml can invert the relationship matrix as well, probably more efficiently than R for large matrices (i.e. `solve()` function), so no need to invert the matrix in R if matrix is large. This function has as options: `round.by`, which let you decide the number of decimals you want; `exclude.0`, if `TRUE`, remove all the zeros from your data (i.e., transforms into sparse); and, name that defines the name to be used in the exported file. Use the default if not sure what parameter use in these function. Here an example using `ped.mrode` data: ```{r, eval=FALSE} #Loading the data example data(ped.mrode) #Computing the matrix A <- Amatrix(data=ped.mrode, ploidy=4, w=0.1) #Building its inverse Ainv <- solve(A) #Exporting it. The function "formatmatrix" # will convert it and save in your working directory formatmatrix(Ainv, round.by=12, exclude.0=TRUE, name="Ainv") ``` ## Relationship matrices using pedigree data for polycrosses - A matrix (beta) Creates an additive relationship matrix A based on a non-deterministic pedigree with 4+ columns where each column represents a possible parent. This function was built with the following designs in mind. 1) A mating design where you have equally possible parents. For example, a generation of insects derived from the mating of three insects in a cage. All the insects in this generation will have the same expected relatedness with all the possible parents (1/3). If there are only two parents in the cage, the function assumes no-inbreeding and the pedigree is deterministic (the individual is offspring of the cross between the two parents). Another example, a population of 10 open-pollinated plants where you harvest the seeds without tracking the mother. 2) When `fixedParent` is TRUE: a mating design where you know one parent and might know the other possible parents. For example, a polycross design where you have seeds harvested from a mother plant and possible polén donors. The following pedigree has the id of the individual followed by possible parents. The possible parents are filled from left to right, in the `pedigree` data frame: id 1,2,3,4 have unknown parents and are assumed unrelated; id 5 has three possible parents (1,2,3); id 6 has three possible parents (2,3,4); id 7 has two parents (deterministic case here, the parents are 3 and 4); id 8 has four possible parents (5,6,7,1). ```{r} pedigree = data.frame(id=1:8, parent1 = c(0,0,0,0,1,2,3,5), parent2 = c(0,0,0,0,2,3,4,6), parent3 = c(0,0,0,0,3,4,0,7), parent4 = c(0,0,0,0,0,0,0,1), parent5 = 0) print(pedigree) AmatrixPolyCross(pedigree) ``` If `fixedParent=TRUE`, the above pedigree will be interpreted with the possible parents are filled from left to right after the known parent, in the `pedigree` data frame: id 1,2,3,4 have unknown parents and are assumed unrelated; id 5 is offspring of parent 1 in a deterministic way and two other possible parents (2,3); id 6 is offspring of parent 2 in a deterministic way and two other possible parents (3,4); id 7 has two parents (deterministic case here, the parents are 3 and 4); as before; id 8 is offspring of parent 5 in a deterministic way and has three other possible parents (6,7,1). ```{r} AmatrixPolyCross(pedigree,fixedParent=TRUE) ``` ## Amatrix() benchmark It follows a small memory and computational time profiling for the `Amatrix()` function. The required RAM was computed based on the peak of the process for different pedigree sizes (based on /usr/bin/time -v output). The time profiling was done using AMD Milan 2.95GHz, so it might be an underestimated value when compared with lower speed processors. Numerator relationship matrices for pedigrees with less than 20,000 rows can built with low-specs user-end machines (<8GB RAM) using our package. ```{r, eval=TRUE,echo=FALSE} x = c(1000,5000,10000,20000,30000,40000,50000,60000,70000,80000,90000,100000)/1000 #Pedigree Size y = c(252156,622500,1795260,6481064,14313448,25227680,49081224,70622336,96017144,125320048,158444856,194731908)/1e+6 #RAM GB ytime = c(0.0025, 0.080, 0.2, 0.89, 1.62,3.01,4.52,7.12,9.15,13.13,15.13,20) #minutes df = data.frame(size=x,ram=y,time=ytime) plot(x=df$size,y=df$ram, ylab = "RAM (GB) at the peak of Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", type="b", axes=FALSE) axis(side = 2, at = c(0,4,8,16,32,48,64,96,144,192),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) plot(x=df$size,y=df$time, type="b", ylab = "Time to run (minutes) the Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", axes=FALSE) axis(side = 2, at = seq(0,20,2),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) ``` ## Bibliography Amadeu, RR, et al. 2023 AGHmatrix: genetic relationship matrices in R. Bioinformatics 39, 7. Amadeu, RR, et al. 2016 AGHmatrix: R package to construct relationship matrices for autotetraploid and diploid species: a blueberry example. The Plant Genome 9, 4. Ashraf, BH, et al. 2016. Estimating genomic heritabilities at the level of family-pool samples of perennial ryegrass using genotyping-by-sequencing. Theoretical and Applied Genetics 129, 45-52. Cockerham, CC. 1954 An extension of the concept of partitioning hereditary variance for analysis of covariances among relatives when epistasis is present. Genetics 39, 859–882. de Bem Oliveira, I, et al. 2019 Genomic prediction of autotetraploids; influence of relationship matrices, allele dosage, and continuous genotyping calls in phenotype prediction. G3: Genes, Genomes, Genetics, 9(4), pp.1189-1198. Endelman, JB, et al. 2018 Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. Genetics 209, 77-87. Hamilton, MG, et al. 2017 Computation of the inverse additive relationship matrix for autopolyploid and multiple-ploidy populations. Theoretical and Applied Genetics 131, 851-890. Henderson, C. 1976 A simple method for computing the inverse of a numerator relationship matrix used in prediction of breeding values. Biometrics 32, 69–83. Kerr, RJ, et al., 2012 Use of the numerator relation ship matrix in genetic analysis of autopolyploid species. Theoretical and Applied Genetics 124, 1271–1282. Legarra, A, et al. 2009 A relationship matrix including full pedigree and genomic information. Journal of Dairy Science 92, 4656–4663. Liu, A, et al. 2020. Weighted single-step genomic best linear unbiased prediction integrating variants selected from sequencing data by association and bioinformatics analyses. Genet Sel Evol 52, 48. Martini, JW, et al. 2018, The effect of the H$^{1}$ scaling factors $\tau$ and $\omega$ on the structure of H in the single-step procedure. Genetics Selection Evolution 50(1), 16. Mrode, RA. 2014 *Linear models for the prediction of animal breeding values*. Cabi. 3rd ed. Munoz, PR, et al., 2014 Unraveling additive from nonadditive effects using genomic relationship matrices. Genetics 198, 1759–1768. R Core Team, 2016 *R*: A Language and Environment for Statistical Computing. R Foundation for Statistical Computing, Vienna, Austria. Resende, MF, et al. 2012 Accuracy of genomic selection methods in a standard data set of loblolly pine (*Pinus taeda* l.). Genetics 190, 1503–1510. Slater, AT, et al. 2014 Improving the analysis of low heritability complex traits for enhanced genetic gain in potato. Theoretical and applied genetics 127, 809–820. Slater, AT, et al. 2016 Improving genetic gain with genomic selection in autotetraploid potato. The Plant Genome 9. Su, G, et al. 2012 Estimating additive and non-additive genetic variances and predicting genetic merits using genome-wide dense single nucleotide polymorphism markers. PloS one 7, e45293. https://doi.org/10.1371/journal.pone.0045293 VanRaden, P. 2008 Efficient methods to compute genomic predictions. Journal of dairy science 91, 4414–4423. Vitezica, ZG, et al. 2013 On the additive and dominant variance and covariance of individuals within the genomic selection scope. Genetics 195, 1223–1230. Yang, J, et al. 2010 Common snps explain a large proportion of the heritability for human height. Nature genetics 42, 565–569. ```{r,eval=FALSE,echo=FALSE} #To knit an this vignette into an .R file knitr::purl("vignettes/Tutorial_AGHmatrix.Rmd") ``` ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/doc/Tutorial_AGHmatrix.Rmd
## ----knitr_init, echo=FALSE, cache=FALSE--------------------------------- library(knitr) library(rmarkdown) knitr::opts_chunk$set(collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 6, fig.align = "center", dev = "png", dpi = 36, cache = TRUE) ## ---- echo=FALSE, results='hide'----------------------------------------- library(AGHmatrix) ## ---- eval=FALSE--------------------------------------------------------- ## ## Install stable version ## install.packages("AGHmatrix") ## ## ## Install development version ## #install.packages("devtools") ## #devtools::install_github("prmunoz/AGHmatrix") ## ## ## Load ## library(AGHmatrix) ## ------------------------------------------------------------------------ data(ped.mrode) ped.mrode str(ped.mrode) #check the structure ## ---- eval=FALSE--------------------------------------------------------- ## #Computing additive relationship matrix for diploids: ## Amatrix(ped.mrode, ploidy=2) ## ## #Computing non-additive relationship matrix considering diploidy: ## Amatrix(ped.mrode, ploidy=2, dominance=TRUE) ## ## #Computing additive relationship matrix considering autotetraploidy: ## Amatrix(ped.mrode, ploidy=4) ## ## #Computing additive relationship matrix considering autooctaploidy: ## Amatrix(ped.mrode, ploidy=8) ## ## #Computing additive relationship matrix considering autotetraploidy ## # and double-reduction of 10%: ## Amatrix(ped.mrode, ploidy=4, w=0.1) ## ## #Computing additive relationship matrix considering autotetraploidy ## # and double-reduction of 10% as Slater et al. (2014): ## Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) ## ## #Computing additive relationship matrix considering autohexaploidy ## # and double-reduction of 10%: ## Amatrix(ped.mrode, ploidy=6, w=0.1) ## ---- eval=FALSE--------------------------------------------------------- ## ?Amatrix ## ------------------------------------------------------------------------ data(snp.pine) snp.pine[1:5,1:5] str(snp.pine) ## ---- eval=FALSE--------------------------------------------------------- ## #Computing the additive relationship matrix based on VanRaden 2008 ## G_VanRadenPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, ## maf=0.05, method="VanRaden") ## ## #Computing the additive relationship matrix based on Yang 2010 ## G_YangPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, ## maf=0.05, method="Yang") ## ## #Computing the dominance relationship matrix based on Su 2012 ## G_SuPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, ## maf=0.05, method="Su") ## ## #Computing the dominance relationship matrix based on Vitezica 2013 ## G_VitezicaPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, ## maf=0.05, method="Vitezica") ## ---- eval=FALSE--------------------------------------------------------- ## ?Gmatrix ## ---- eval=FALSE--------------------------------------------------------- ## #Loading the data ## data(snp.sol) ## str(snp.sol) ## ## #Computing the additive relationship matrix based on VanRaden 2008 ## # adapted by Ashraf 2016 ## G_VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) ## ## #Computing the dominance (digenic) matrix based on Endelman 2018 (Eq. 19) ## G_Dominance <- Gmatrix(snp.sol, method="Endelman", ploidy=4) ## ## #Computing the full-autopolyploid matrix based on Slater 2016 (Eq. 8 ## # and 9) ## G_FullAutopolyploid <- Gmatrix(snp.sol, method="Slater", ploidy=4) ## ## #Computing the pseudodiploid matrix based on Slater 2016 (Eq. 5, 6, ## # and 7) ## G_Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) ## ---- eval=FALSE--------------------------------------------------------- ## ?Gmatrix ## ---- eval=FALSE--------------------------------------------------------- ## data(ped.sol) ## data(snp.sol) ## ## #Computing the numerator relationship matrix 10% of double-reduction ## Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) ## ## #Computing the additive relationship matrix based on VanRaden (modified) ## Gmat <- Gmatrix(snp.sol, ploidy=4, missingValue=-9, ## maf=0.05, method="VanRaden") ## ## #Computing H matrix (Martini) ## Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", ## ploidy=4, missingValue=-9, maf=0.05) ## ## #Computing H matrix (Munoz) ## Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, ## ploidy=4, method="Munoz", ## missingValue=-9, maf=0.05) ## ---- eval=FALSE--------------------------------------------------------- ## data(snp.pine) ## A <- Gmatrix(SNPmatrix=snp.pine, method="VanRaden", missingValue=-9, maf=0.05) ## D <- Gmatrix(SNPmatrix=snp.pine, method="Vitezica", missingValue=-9,maf=0.05) ## ---- eval=FALSE--------------------------------------------------------- ## #Additive-by-Additive Interactions ## A_A <- A*A ## #Dominance-by-Additive Interactions ## D_A <- D*A ## #Dominance-by-Dominance Interactions ## D_D <- D*D ## ---- eval=FALSE--------------------------------------------------------- ## #Additive-by-Additive-by-Additive Interactions ## A_A_A <- A*A*A ## #Additive-by-Additive-by-Dominance Interactions ## A_A_D <- A*A*D ## #Additive-by-Dominance-by-Dominance Interactions ## A_D_D <- A*D*D ## #Dominance-by-Dominance-by-Dominance Interactions ## D_D_D <- D*D*D ## ---- eval=FALSE--------------------------------------------------------- ## #Loading the data example ## data(ped.mrode) ## ## #Computing the matrix ## A <- Amatrix(data=ped.mrode, ploidy=4, w=0.1) ## ## #Building its inverse ## Ainv <- solve(A) ## ## #Exporting it. The function "formatmatrix" ## # will convert it and save in your working directory ## formatmatrix(Ainv, round.by=12, exclude.0=TRUE, name="Ainv") ## ----eval=FALSE,echo=FALSE----------------------------------------------- ## #To knit an this vignette into an .R file ## knitr::purl("vignettes/Tutorial_AGHmatrix.Rmd") ## ------------------------------------------------------------------------ sessionInfo()
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/misc/Tutorial_AGHmatrix.R
--- title: "AGHmatrix: An R package to compute relationship matrices for diploid and autopolyploid species" author: "Rodrigo Amadeu" date: "`r Sys.Date()`" output: pdf_document: latex_engine: xelatex toc: TRUE vignette: > %\VignetteIndexEntry{AGHmatrix Tutorial} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r knitr_init, echo=FALSE, cache=FALSE} library(knitr) library(rmarkdown) knitr::opts_chunk$set(collapse = TRUE, comment = "#>", fig.width = 6, fig.height = 6, fig.align = "center", dev = "png", dpi = 36, cache = TRUE) ``` ```{r, echo=FALSE, results='hide'} library(AGHmatrix) ``` ## Contact Rodrigo R Amadeu rramadeu at ufl dot edu Horticultural Sciences Department University of Florida https://rramadeu.github.io ## Overview AGHmatrix software is an R-package to build relationship matrices using pedigree (A matrix) and/or molecular markers (G matrix) with the possibility to build a combined matrix of Pedigree corrected by Molecular (H matrix). The package works with diploid and autopolyploid Data. If you are not familiar with `R`, we recommend the reading of vignette [Introduction to R](http://htmlpreview.github.io/?https://github.com/augusto-garcia/onemap/blob/master/inst/doc/Introduction_R.html). ## Matrices computation implemented in the `AGHmatrix` Currently can compute the following 14 different relationship matrices: ### Pedigree-based relationship matrix (A matrix) | |&nbsp; &nbsp; &nbsp; &nbsp; | Additive |&nbsp; &nbsp; &nbsp; &nbsp; |Non-Additive | |---------------|--------------|:-------------------------:|--------------|:--------------------------:| | **Diploid** |&nbsp; &nbsp; &nbsp; &nbsp; | Henderson (1976) |&nbsp; &nbsp; &nbsp; &nbsp; |Cockerham (1954) | | **Polyploid** |&nbsp; &nbsp; &nbsp; &nbsp; | Kerr (2012), Slater (2013)|&nbsp; &nbsp; &nbsp; &nbsp; || | ### Molecular-based relationship matrix (G matrix) | |&nbsp; &nbsp; &nbsp; &nbsp; | Additive |&nbsp; &nbsp; &nbsp; &nbsp; | Non-Additive | |-----------|--------------|:----------------------------:|----|:---------------------------:| | **Diploid** |&nbsp; &nbsp; &nbsp; &nbsp; | Yang (2010), VanRaden (2012) |&nbsp; &nbsp; &nbsp; &nbsp; | Su (2012), Vitezica (2013) | | **Polyploid** |&nbsp; &nbsp; &nbsp; &nbsp; | Slater (2016), VanRaden (2012) |&nbsp; &nbsp; &nbsp; &nbsp; | Slater (2016), Endelman (2018) | ### Combined pedigree and molecular-based relationship matrix (H matrix) | **Any ploidy/effect** | |:---------------------------:| | Munoz (2014), Martini (2018) | </center> ## Citation To cite this R package: Amadeu, R. R., C. Cellon, J. W. Olmstead, A. A. F. Garcia, M. F. R. Resende, and P. R. Munoz. 2016. AGHmatrix: R Package to Construct Relationship Matrices for Autotetraploid and Diploid Species: A Blueberry Example. *The Plant Genome* 9(3). [doi: 10.3835/plantgenome2016.01.0009](http://dx.doi.org/10.3835/plantgenome2016.01.0009) ## Installing and loading Within R: ```{r, eval=FALSE} ## Install stable version install.packages("AGHmatrix") ## Install development version #install.packages("devtools") #devtools::install_github("prmunoz/AGHmatrix") ## Load library(AGHmatrix) ``` ## Relationship matrices using pedigree data - A matrix `Amatrix` process the pedigree and build the A-matrix related to that given pedigree. The matrix is build according to the recursive method presented in Mrode (2014) and described by Henderson (1976). This method is expanded for higher ploidies (n-ploidy) according with Kerr et al. (2012). After loading the package you have to load your data file into the software. To do this, you can use the function `read.data()` or `read.csv()` for example. Your data should be available in R as a `data.frame` structure in the following order: column 1 must be the individual/genotype names (id), columns 2 and 3 must be the parent names. For the algorhitm does not matter who is the mom and who is the dad. In the package there is a pedigree data example (`ped.mrode`) that can be used to look at the structure and order the data. To load `ped.mrode`: ```{r} data(ped.mrode) ped.mrode str(ped.mrode) #check the structure ``` The example `ped.mrode` has 3 columns, column 1 contains the names of the individual/genotypes, column 2 contains the names of the first parent, column 3 contains the names of the second parental. There is no header and the unknown value must be equal 0. Your data has to be in the same format of `ped.mrode`. Internally in the algorithm, the first step is the pre-processing of the pedigree: the individuals are numerated $1$ to $n$. Then, it is verified whether the genotypes in the pedigree are in chronological order (i.e. if the parents of a given individual are located prior to this individual in the pedigree dataset). If this order is not followed, the algorithm performs the necessary permutations to correct them. After this pre-processing, the matrices computation proceeds as in Mrode (2014) for diploid - for additive or dominance relationship - and as in Kerr et al. (2012) for autotetraploids - for additive relationship. For autotetraploids there is the option to include double-reduction fraction (as presented Slater et al., 2014). For diploids there is the option to compute the non-additive relationship matrix (Cockerham, 1954). It follows some usage examples with the `ped.mrode`. ```{r, eval=FALSE} #Computing additive relationship matrix for diploids: Amatrix(ped.mrode, ploidy=2) #Computing non-additive relationship matrix considering diploidy: Amatrix(ped.mrode, ploidy=2, dominance=TRUE) #Computing additive relationship matrix considering autotetraploidy: Amatrix(ped.mrode, ploidy=4) #Computing additive relationship matrix considering autooctaploidy: Amatrix(ped.mrode, ploidy=8) #Computing additive relationship matrix considering autotetraploidy # and double-reduction of 10%: Amatrix(ped.mrode, ploidy=4, w=0.1) #Computing additive relationship matrix considering autotetraploidy # and double-reduction of 10% as Slater et al. (2014): Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) #Computing additive relationship matrix considering autohexaploidy # and double-reduction of 10%: Amatrix(ped.mrode, ploidy=6, w=0.1) ``` More information about `Amatrix` can be found with: ```{r, eval=FALSE} ?Amatrix ``` ## Relationship matrices using the molecular data - G matrix for diploids `Gmatrix` handles the molecular-markers matrix and builds the relationship matrix. Molecular markers data should be organized in a matrix format (individuals in rows and markers in columns) coded as 0,1,2 and missing data value (numeric or `NA`). Import your molecular marker data into `R` with the function `read.table()` or `read.csv()` and convert to a matrix format with the function `as.matrix()`. The function `Gmatrix` can be used to construct the additive relationship either as proposed by Yang et al. (2010) or the proposed by VanRaden (2008). The function can also construct the dominance relationship matrix either as proposed by Su et al. (2012) or as proposed by Vitezica et al. (2013). As an example, here we build the four matrices using real data from Resende et al. (2012). To load `snp.pine` and to check its structure: ```{r} data(snp.pine) snp.pine[1:5,1:5] str(snp.pine) ``` In this dataset we have 926 individuals with 4853 markers and the missing data value is `-9`. It follows some usage examples with the `snp.pine` data where the unknown value (`missingValue`) is `-9`. Here we set minimum allele frequency to `0.05`. ```{r, eval=FALSE} #Computing the additive relationship matrix based on VanRaden 2008 G_VanRadenPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="VanRaden") #Computing the additive relationship matrix based on Yang 2010 G_YangPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Yang") #Computing the dominance relationship matrix based on Su 2012 G_SuPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Su") #Computing the dominance relationship matrix based on Vitezica 2013 G_VitezicaPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Vitezica") ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Relationship matrices using the molecular data - G matrix for autopolyploids Molecular markers data should be organized in a matrix format (individual in rows and markers in columns) coded according to the dosage level: 0,1,2,...,ploidy level, and missing data value (numeric or `NA`). As an example, an autotetraploid should be coded as 0,1,2,3,4, and missing data value. In autopolyploids, the function `Gmatrix` can be used to construct: i) the additive relationship based on VanRaden (2008) and extended by Ashraf (2016); ii) the full-autopolyploid including additive and non-additive model as equations 8 and 9 in Slater et al. (2016); iii) the pseudo-diploid model as equations 5, 6, and 7 Slater et al. (2016). iv) the digenic-dominant model based on Endelman et al. (2018). As an example, here we build the matrices using data from Endelman et al. (2018) (`snp.sol`). ```{r, eval=FALSE} #Loading the data data(snp.sol) str(snp.sol) #Computing the additive relationship matrix based on VanRaden 2008 # adapted by Ashraf 2016 G_VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) #Computing the dominance (digenic) matrix based on Endelman 2018 (Eq. 19) G_Dominance <- Gmatrix(snp.sol, method="Endelman", ploidy=4) #Computing the full-autopolyploid matrix based on Slater 2016 (Eq. 8 # and 9) G_FullAutopolyploid <- Gmatrix(snp.sol, method="Slater", ploidy=4) #Computing the pseudodiploid matrix based on Slater 2016 (Eq. 5, 6, # and 7) G_Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Combined relationship matrix - H matrix H matrix is the relationship matrix using combined information from the pedigree and genomic relationship matrices. First, you need to compute the matrices separated and then use them as input to build the combined H matrix. Two methods are implemented. `Munoz` shirinks the G matrix towards the A matrix scaling the molecular relatadness by each relationship classes. `Martini` is a modified version from Legarra et al. 2009 where combines A and G matrix using scaling factors. As an example, here we build the matrices using data from Endelman et al. (2018) (`ped.sol` and `snp.sol`). ```{r, eval=FALSE} data(ped.sol) data(snp.sol) #Computing the numerator relationship matrix 10% of double-reduction Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) #Computing the additive relationship matrix based on VanRaden (modified) Gmat <- Gmatrix(snp.sol, ploidy=4, missingValue=-9, maf=0.05, method="VanRaden") #Computing H matrix (Martini) Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", ploidy=4, missingValue=-9, maf=0.05) #Computing H matrix (Munoz) Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, ploidy=4, method="Munoz", missingValue=-9, maf=0.05) ``` ## Covariance matrices due to epistatic terms Here we present how to compute the epistasis relationship matrices using Hadamard products (i.e. cell-by-cell product), denoted by `*`. For more information please see Munoz et al. (2014). In this example we are using the molecular-based relationship matrices. First, build the additive and dominance matrices: ```{r, eval=FALSE} data(snp.pine) A <- Gmatrix(SNPmatrix=snp.pine, method="VanRaden", missingValue=-9, maf=0.05) D <- Gmatrix(SNPmatrix=snp.pine, method="Vitezica", missingValue=-9,maf=0.05) ``` For the first degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive Interactions A_A <- A*A #Dominance-by-Additive Interactions D_A <- D*A #Dominance-by-Dominance Interactions D_D <- D*D ``` For the second degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive-by-Additive Interactions A_A_A <- A*A*A #Additive-by-Additive-by-Dominance Interactions A_A_D <- A*A*D #Additive-by-Dominance-by-Dominance Interactions A_D_D <- A*D*D #Dominance-by-Dominance-by-Dominance Interactions D_D_D <- D*D*D ``` And so on... ## Exporting your matrix as three columns and sparse format (ASReml - csv format) That is the lower diagonal matrix formatted in three columns in .csv format (other ASCII extension could be used as well). In order to do this, we need to build a matrix, its inverse, and export it using `formatmatrix` function. ASReml can invert the relationship matrix as well, probably more efficiently than R for large matrices (i.e. `solve()` function), so no need to invert the matrix in R if matrix is large. This function has as options: `round.by`, which let you decide the number of decimals you want; `exclude.0`, if `TRUE`, remove all the zeros from your data (i.e., transforms into sparse); and, name that defines the name to be used in the exported file. Use the default if not sure what parameter use in these function. Here an example using `ped.mrode` data: ```{r, eval=FALSE} #Loading the data example data(ped.mrode) #Computing the matrix A <- Amatrix(data=ped.mrode, ploidy=4, w=0.1) #Building its inverse Ainv <- solve(A) #Exporting it. The function "formatmatrix" # will convert it and save in your working directory formatmatrix(Ainv, round.by=12, exclude.0=TRUE, name="Ainv") ``` ## Bibliography Amadeu, RR, et al., 2016 AGHmatrix: R package to construct relationship matrices for autotetraploid and diploid species: a blueberry example. *The Plant Genome* 9(4). https://doi.org/10.3835/plantgenome2016.01.0009 Ashraf, BH, et a., 2016 Estimating genomic heritabilities at the level of family-pool samples of perennial ryegrass using genotyping-by-sequencing. *Theoretical and Applied Genetics* 129: 45-52. https://doi.org/0.1007/s00122-015-2607-9 Endelman, JB, et al., 2018. Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. *Genetics*, 209(1) pp. 77-87. https://doi.org/10.1534/genetics.118.300685 Hamilton, MG, et al., 2017 Computation of the inverse additive relationship matrix for autopolyploid and multiple-ploidy populations. *Theoretical and Applied Genetics*. https://doi.org/10.1007/s00122-017-3041-y Henderson, C, 1976 A simple method for computing the inverse of a numerator relationship matrix used in prediction of breeding values. *Biometrics* pp. 69–83. https://doi.org/10.2307/2529339 Kerr, RJ, et al., 2012 Use of the numerator relation ship matrix in genetic analysis of autopolyploid species. *Theoretical and Applied Genetics* 124: 1271–1282. https://doi.org/10.1007/s00122-012-1785-y Martini, JW, et al., 2018, The effect of the H− 1 scaling factors τ and ω on the structure of H in the single-step procedure. Genetics Selection Evolution, 50(1), 16. https://doi.org/10.1186/s12711-018-0386-x Mrode, R. A., 2014 *Linear models for the prediction of animal breeding values*. Cabi. 3rd ed. Munoz, PR, et al., 2014 Unraveling additive from nonadditive effects using genomic relationship matrices. *Genetics* 198: 1759–1768. https://doi.org/10.1534/genetics.114.171322 R Core Team, 2016 *R*: A Language and Environment for Statistical Computing. R Foundation for Statistical Computing, Vienna, Austria. Resende, MF, et al., 2012 Accuracy of genomic selection methods in a standard data set of loblolly pine (*Pinus taeda* l.). *Genetics* 190: 1503–1510. https://doi.org/10.1534/genetics.111.137026 Slater, AT, et al., 2014 Improving the analysis of low heritability complex traits for enhanced genetic gain in potato. *Theoretical and applied genetics* 127: 809–820. https://doi.org/10.1007/s00122-013-2258-7 Slater AT, et al., 2016 Improving genetic gain with genomic selection in autotetraploid potato. *The Plant Genome* 9. https://doi.org/10.3835/plantgenome2016.02.0021 Su, G, et al., 2012 Estimating additive and non-additive genetic variances and predicting genetic merits using genome-wide dense single nucleotide polymorphism markers. *PloS one* 7:e45293. https://doi.org/10.1371/journal.pone.0045293 VanRaden, P, 2008 Efficient methods to compute genomic predictions. *Journal of dairy science* 91: 4414–4423. https://doi.org/10.3168/jds.2007-0980 Vitezica, ZG, et al., 2013 On the additive and dominant variance and covariance of individuals within the genomic selection scope. *Genetics* 195: 1223–1230. https://doi.org/10.1534/genetics.113.155176 Yang, J, et al., 2010 Common snps explain a large proportion of the heritability for human height. *Nature genetics* 42: 565–569. https://doi.org/10.1038/ng.608 ```{r,eval=FALSE,echo=FALSE} #To knit an this vignette into an .R file knitr::purl("vignettes/Tutorial_AGHmatrix.Rmd") ``` ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/misc/Tutorial_AGHmatrix.Rmd
######################################### # # Package: AGHmatrix # # File: converttofrequency.R # Contains: convertofrequency # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 14-Apr-2015 # License: GPL-3 # ######################################### #' Converts molecular (AA,AB,BB) data in a frequency format (0,0.5,1). #' #' Converts molecular data in frequency format. Molecular data can be coded with numbers, letters or any ascii characters. #' #' @param file path of your file (marker in rows and individual in columns). #' @param ploidy ploidy of your data 2 or 4, Default=2. #' @param format 1 if the data code as 0,1,2,..; 2 if if the data code as ...,-1,0,1,...; 3 if if the data code as "BB","AB","AA" or "BBBB", "ABBB", "AABB", "AAAB", "AAAA"; 4 if different. Than, you need to specify in genotype parameter. #' @param unk unknown value assumed (default=NA) #' @param genotype available if format=4. Please insert your genotype categories here as a list. e.g.: genotype=c("CCCC","CGGG","CCGG","CGGG","GGGG") in this example CCCC will be coded as 0, CGGG as 0.25, CCGG as 0.5, CGGG as 0.75, G as 1. Default=NULL. #' @param output choose a the name of output file. Default="convdata". #' @param dominant if TRUE, returns the dominant parameterization. #' @param transpose if TRUE, returns the transposable matrix. #' #' @return csv file markers x individuals with frequencies instead of molecular code. #' #' @examples #' converttofrequency() #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export converttofrequency <- function( file=NULL, ploidy=2, format=4, unk=NA, genotype=NULL, dominant=FALSE, output="convdata", transpose=FALSE) { if( !is.null(file)){ y <- as.matrix(read.csv(file,header=FALSE,sep=",")) }else(stop(deparse("Select a file name"))) if(transpose) y<-t(y) ind.names <- y[,1] ind.names <- c(as.matrix(ind.names[-1])) markers <- y[1,] markers <- c(as.matrix(markers[-1])) cat("Check if the following information is correct.","\n","If not, correct the data ...", "\n") cat("Considering",length(ind.names),"individual names:",head(ind.names),"...","\n") cat("Considering",length(markers),"markers:",head(markers),"...","\n") y <- y[-1,-1] y[y==unk] <- NA if(format==1 || format==2){ #format 1 = 0,1,2; = 0,1,2,3,4; ... y <- matrix(as.numeric(y),nrow=length(ind.names)) if(dominant) y<- y/ploidy if(format==2){ #format 2 = -1,0,1; = -2,-1,0,1,2; ... y <- y + 0.5 } } if(format==3 || format==4){ #format 3 = BB,AB,AA if(format==3){ if(ploidy==2) genotype<-c("BB","AB","AA") if(ploidy==4) genotype<-c("BBBB","ABBB","AABB","AAAB","AAAA") } if(format==4 && is.null(genotype)) stop(deparse("Choose a format of your data. If equal to 4, indicate the genotype")) match.alg <- genotype cat("Converting the data to frequency using the following transformation...","\n") cat(c(0:(length(genotype)-1))/(length(genotype)-1),"\n") cat(genotype,"\n") code <- c(0,seq(1:(length(match.alg)-1))) for(i in 1:length(match.alg)) y[y==match.alg[i]]=code[i] y <- matrix(as.numeric(y),nrow=length(ind.names)) y <- y/ploidy } if(dominant){ cat("Converting the data to dominant type...","\n") cat(c(0:(length(genotype)-1))/(length(genotype)-1),"\n") cat(c(0,rep(1,length(genotype)-2),0),"\n") y <- (y!=0)*(y!=1) } #y <- rbind(markers,y) colnames(y) <- markers rownames(y) <- ind.names # rownames(y) <- colnames(y) <- NULL # y[1,1]<-"Ind" if(dominant){ write.table(y,file=paste(output,"dom.csv",sep=""),col.names=T,quote=FALSE,row.names=T,sep=",") cat(paste("Data saved as: ",output,"dom.csv",sep="")) }else{ write.table(y,file=paste(output,".csv",sep=""),col.names=T,quote=FALSE,row.names=T,sep=",") cat(paste("Data saved as: ",output,".csv",sep="")) } }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/misc/converttofrequency.R
########################################## # # Package: AGHmatrix # # File: explore_matrix.R # Contains: explorematrix # # Written by Rodrigo Rampazo Amadeu # # First version: Feb-2014 # Last update: 14-Apr-2015 # License: GPL-3 # ######################################### #' Explore a relationship matrix #' #' Given a data from 'Amatrix', 'Gmatrix', or 'Hmatrix' functions, return a list with exploratory analysis of the matrix. #' #' @param data output of function 'Amatrix' or 'Gmatrix' or 'Hmatrix'. #' @param type "A" or "G" or "H". Default=NULL. #' @param print if TRUE, print analysis. Default=TRUE. #' @param w proportion of parental gametas IBD due to double reduction. Default=0. #' @param name csv file name #' #' @return list with exploratory analysis about the matrix: summary off-diagonal, summary diagonal, sort data (top shared genotypes)... #' #' @examples #' data(ped.mrode) #' Amat <- Amatrix(ped.mrode) #' explorematrix(Amat) #' #' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com} #' #' @export explorematrix <- function(data = NULL, type=NULL, print=TRUE, w=0, name=NULL){ cat("Doing the exploratory analysis... \n") Time = proc.time() A <- data double.reduction <- w dim <- c( nrow = nrow(A), ncol = ncol(A) ) summary.off.diag <- summary(A[upper.tri(A,diag=FALSE)]) sort.data <- sort.data(A) #Find top shared genotype in the data summary.diag <- summary(diag(A)) sort.diag <- sort.diag(diag(A)) #Find top inbreedings in the diag summary.inbreeding <- summary(diag(A-1)) number.markers.used <- data$number.markers.used listA <- list(type=type, dim = dim, matrix = A, summary.off.diag = summary.off.diag, sort.data = sort.data, summary.diag = summary.diag, sort.diag = sort.diag, summary.inbreeding=summary.inbreeding, class="explore.gen.data") Time = as.matrix(proc.time()-Time) cat("Completed! Time =", Time[3]/60," minutes \n") structure(listA,class="explore.gen.data") }
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/misc/explorematrix.R
library(AGHmatrix) Gmatrix inds <- 10 markers <- 100 markersdata <- matrix(sample(x=0:4, size=inds*markers, replace=TRUE), nrow=inds, ncol=markers) markersdata G1 <-Gmatrix(markersdata,ploidy=4,method="VanRaden") G2 <-Gmatrix(markersdata,ploidy=4,method="Endelman") SNPmatrix<-markersdata inds <- 3 markers <- 4 markersdata <- matrix(sample(x=0:4, size=inds*markers, replace=TRUE), nrow=inds, ncol=markers) Frequency <- colSums(SNPmatrix)/(nrow(SNPmatrix)*ploidy) Frequency <- cbind(Frequency,1-Frequency) Frequency <- cbind(apply(SNPmatrix, 2, function(x) alelleFreq(x,0)) , apply(SNPmatrix, 2, function(x) alelleFreq(x, 2))) Q1 <- matrix(rep(c(9,4,25,16),3),nrow=3,byrow=TRUE)/144*6 Q2 <- matrix(c(0,4,10,4, 9,0,0,12, 0,0,15,0),nrow=3,byrow=TRUE)*3/12 Q3 <- matrix(c(0,2,2,0, 6,0,0,6, 0,0,6,0),nrow=3,byrow=TRUE)*0.5 Q<-Q1-Q2+Q3 tcrossprod(Q)/drop((6*crossprod(c(3/12,2/12,5/12,4/12)^2,c(1-c(3/12,2/12,5/12,4/12))^2))) M1 <- matrix(c(0,2,2,1, 3,0,0,3, 0,0,3,0),nrow=3,byrow=TRUE) M2 <-matrix(c(4,2,2,3, 1,4,4,1, 4,4,1,4),nrow=3,byrow=TRUE) Gmatrix(M1,ploidy=4) Gmatrix(M2,ploidy=4) ## Including Data from Endelman 2018 MolPotato <- read.table("~/git/AGHmatrix/data/FileS2.csv",header=TRUE,sep=",") ind <- MolPotato[[1]] MolPotato <- as.matrix(MolPotato[,-1]) rownames(MolPotato) <- ind snp.potato <- MolPotato save(snp.potato,file="snp.potato.rdata") library(AGHmatrix) G1 <- Gmatrix(MolPotato,ploidy=4) G2 <- Gmatrix(MolPotato,ploidy=4,method="Slater") G3 <- Gmatrix(MolPotato,ploidy=4,method="Endelman") PedPotato <- read.table("~/git/AGHmatrix/data/FileS3.csv",header=TRUE,sep=",") PedPotato[,2] <- PedPotato[PedPotato[,2],1] PedPotato[which(PedPotato[,3]==0),3] <- NA PedPotato[which(PedPotato[,4]==0),4] <- NA PedPotato[,3] <- PedPotato[PedPotato[,3],1] PedPotato[,4] <- PedPotato[PedPotato[,4],1] ped.potato <- PedPotato[,c(2,3,4)] ped.potato$GID <- as.character(ped.potato$GID) ped.potato$Mother <- as.character(ped.potato$Mother) ped.potato$Father <- as.character(ped.potato$Father) ped.potato[which(is.na(ped.potato[,3])),3] <- "0" ped.potato[which(is.na(ped.potato[,2])),2] <- "0" A1<-Amatrix(ped.potato) names(ped.potato)[1]<-"Ind" A1<-Amatrix(ped.potato) save(ped.potato,file="ped.potato.rdata") data("snp.potato") ?Gmatrix Gmatrix.VanRaden <- Gmatrix(snp.potato, method="VanRaden", ploidy=4) Gmatrix.Endelman <- Gmatrix(snp.potato, method="Endelman", ploidy=4) Gmatrix.Slater <- Gmatrix(snp.potato, method="Slater", ploidy=4) Gmatrix.Pseudodiploid <- Gmatrix(snp.potato, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) Amatrix.potato <- Amatrix(ped.potato, ploidy=4)
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/inst/misc/misc.R
--- title: "AGHmatrix Tutorial" author: "Rodrigo Amadeu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{AGHmatrix Tutorial} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r knitr_init, echo=FALSE, cache=FALSE} library(knitr) library(rmarkdown) knitr::opts_chunk$set(collapse = TRUE, comment = "#>", fig.width = 7, fig.height = 8, fig.align = "center", dev = "png", dpi = 72, cache = TRUE) ``` ```{r, echo=FALSE, results='hide'} library(AGHmatrix) ``` ## Contact Rodrigo R Amadeu rramadeu at gmail dot com https://rramadeu.github.io ## Overview AGHmatrix software is an R-package to build relationship matrices using pedigree (A matrix) and/or molecular markers (G matrix) with the possibility to build a combined matrix of Pedigree corrected by Molecular (H matrix). The package works with diploid and autopolyploid data. ## Matrices computation implemented in the `AGHmatrix` Currently the package computes the following 17 different relationship matrices: ### Pedigree-based relationship matrix (A matrix) | | Additive | Non-Additive | |-------------------|----------------------------|------------------| | **Diploid** | Henderson (1976) | Cockerham (1954) | | **Autopolyploid** | Kerr (2012), Slater (2013) | | ### Molecular-based relationship matrix (G matrix) | | Additive | Non-Additive | |-------------------|-------------------------------------------|--------------------------------| | **Diploid** | Yang (2010), VanRaden (2012), Liu (2020) | Su (2012), Vitezica (2013) | | **Polyploid** | Slater (2016), de Bem Oliveira (2019) | Slater (2016), Endelman (2018) | ### Combined pedigree and molecular-based relationship matrix (H matrix) | **Any ploidy/effect** | |----------------------------------------------| | Legarra (2009), Munoz (2014), Martini (2018) | Additionally there is a beta implementation to compute A matrix when parentage is not deterministic as in a polycross design. See `?AmatrixPolycross`. ## Citation To cite this R package: Amadeu RR, Garcia AA, Munoz PR, Ferrão LF. AGHmatrix: genetic relationship matrices in R. Bioinformatics. 2023 Jul 1;39(7):btad445. https://doi.org/10.1093/bioinformatics/btad445 ## Installing and loading Within R: ```{r, eval=FALSE} ## Install stable version install.packages("AGHmatrix") ## Install development version install.packages("devtools") devtools::install_github("rramadeu/AGHmatrix") ## Load library(AGHmatrix) ``` ## Relationship matrices using pedigree data - A matrix `Amatrix` process the pedigree and build the A-matrix related to that given pedigree. The matrix is built based in the recursive method presented in Mrode (2014) and described by Henderson (1976). This method is expanded for higher ploidies (n-ploidy) as detailed in Kerr et al. (2012). After loading the package you have to load your data file into the software. To do this, you can use the function `read.data()` or `read.csv()` for example. Your data should be available in R as a `data.frame` structure in the following order: column 1 must be the individual/genotype names (id), columns 2 and 3 must be the parent names. For the algorithm, it does not matter who is the mother and who is the father (so, no sex column). There is a pedigree data example (`ped.mrode`) that can be used to look at the structure and order the data. To load `ped.mrode`: ```{r} data(ped.mrode) ped.mrode str(ped.mrode) #check the structure ``` The example `ped.mrode` has 3 columns, column 1 contains the names of the individual/genotypes, column 2 contains the names of the first parent, column 3 contains the names of the second parental (example from Table 2.1 of Mrode 2014). There is no header template, and the unknown value must be equal 0. Your data has to be in the same format of `ped.mrode`. Internally the algorithm first pre-process the pedigree: the individuals are numerated $1$ to $n$. Then, it is verified whether the genotypes in the pedigree are in chronological order (i.e. if the parents of a given individual are located before to this individual in the pedigree data set). If this order is not followed, the algorithm performs the necessary changes to correct them in a iterative way. After this pre-processing, the matrix computation proceeds as in Henderson (1976) for diploid - for additive or dominance relationship - and as in Kerr et al. (2012) for autotetraploids - for additive relationship. For autotetraploids, there is the option to include double-reduction fraction. For diploids there is the option to compute the dominant relationship matrix (Cockerham, 1954). It follows some usage examples with the `ped.mrode`. ```{r, eval=FALSE} #Computing additive relationship matrix for diploids (Henderson 1976): Amatrix(ped.mrode, ploidy=2) #Computing dominant relationship matrix for diploids (Cockerham 1954): Amatrix(ped.mrode, ploidy=2, dominance=TRUE) #Computing additive relationship matrix for autotetraploids (Kerr 2012): Amatrix(ped.mrode, ploidy=4) #Computing additive relationship matrix for autooctaploids (Kerr 2012): Amatrix(ped.mrode, ploidy=8) #Computing additive relationship matrix for autotetraploids # and double-reduction of 0.1 (Kerr 2012): Amatrix(ped.mrode, ploidy=4, w=0.1) #Computing additive relationship matrix for autotetraploids # and double-reduction of 0.1 as in Slater et al. (2014): Amatrix(ped.mrode, ploidy=4, w=0.1, slater = TRUE) #not recommended, but kept in the package to reproduce some former analysis #Computing additive relationship matrix for autohexaploids # and double-reduction of 0.1 (Kerr 2012): Amatrix(ped.mrode, ploidy=6, w=0.1) ``` More information about `Amatrix` can be found with: ```{r, eval=FALSE} ?Amatrix ``` ## Diploid G matrix: relationship matrices using the molecular data `Gmatrix` handles the molecular-marker matrix and builds the relationship matrix. Molecular markers data should be organized in a matrix format (individuals in rows and markers in columns) coded as 0, 1, 2 and missing data value (numeric or `NA`). Import your molecular marker data into `R` with the function `read.table()` or `read.csv()` and convert to a matrix format with the function `as.matrix()`. The function `Gmatrix` can be used to construct the additive relationship either as proposed by Yang et al. (2010) or the proposed by VanRaden (2008). The function can also construct the dominance relationship matrix either as proposed by Su et al. (2012) or as proposed by Vitezica et al. (2013). As an example, here we build the four matrices using real data from Resende et al. (2012). To load `snp.pine` and to check its structure: ```{r} data(snp.pine) snp.pine[1:5,1:5] str(snp.pine) ``` In this dataset, we have 926 individuals with 4853 markers and the missing data value is `-9`. It follows some examples with the `snp.pine` data where the unknown value (`missingValue`) is `-9`. Here we set minimum allele frequency to `0.05`, so markers with minor allele frequency lower than 0.05 are removed from the dataset prior to the G matrix construction. ```{r, eval=FALSE} #Computing the additive relationship matrix based on VanRaden 2008 G_VanRadenPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="VanRaden") #Computing the additive relationship matrix based on Yang 2010 G_YangPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Yang") #Computing the dominance relationship matrix based on Su 2012 G_SuPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Su") #Computing the dominance relationship matrix based on Vitezica 2013 G_VitezicaPine <- Gmatrix(SNPmatrix=snp.pine, missingValue=-9, maf=0.05, method="Vitezica") ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Autopolyploid G matrix: relationship matrices using the molecular data Molecular markers data should be organized in a matrix format (individual in rows and markers in columns) coded according to the dosage level: 0, 1, 2, ..., ploidy level, and missing data value (numeric user-defined or `NA`). As an example, an autotetraploid should be coded as 0, 1, 2, 3, 4, and a missing data value. In autopolyploids, the function `Gmatrix` can be used to construct: i) the additive relationship based on VanRaden (2008) and extended by Ashraf (2016); ii) the full-autopolyploid including additive and non-additive model as equations 8 and 9 in Slater et al. (2016); iii) the pseudo-diploid model as equations 5, 6, and 7 Slater et al. (2016). iv) the digenic-dominant model based on Endelman et al. (2018). As an example, here we build the matrices using data from Endelman et al. (2018) (`snp.sol`). There is also an option to build weighted relationship matrices as in Liu et al. (2020). The argument `ploidy.correction` defines the denominator of the formula for the `VanRaden` method. If `ploidy.correction=TRUE`, it uses the parametric correction as $\sum_i p f_i(1-f_i)$, where $p$ is the ploidy level and $f_i$ is the minor allele frequency of the $i_th$ marker. If `ploidy.correction=FALSE`, it uses the sampling variance correction as $\sum_i \frac{1}{p} s^2(m_i)$, where $s^2(m_i)$ is the sampling variance of the $i_th$ marker. Both corrections are equivalent when sampling size goes to the infinity. The default is to use the sampling variance as the correction (i.e., `ploidy.correction=FALSE`). ```{r, eval=FALSE} #Loading the data data(snp.sol) str(snp.sol) #Computing the additive relationship matrix based on VanRaden 2008 # adapted by Ashraf 2016 G_VanRaden <- Gmatrix(snp.sol, method="VanRaden", ploidy=4) #Computing the dominance (digenic) matrix based on Endelman 2018 (Eq. 19) G_Dominance <- Gmatrix(snp.sol, method="Endelman", ploidy=4) #Computing the full-autopolyploid matrix based on Slater 2016 (Eq. 8 #and 9) G_FullAutopolyploid <- Gmatrix(snp.sol, method="Slater", ploidy=4) #Computing the pseudodiploid matrix based on Slater 2016 (Eq. 5, 6, #and 7) G_Pseudodiploid <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, pseudo.diploid=TRUE) #Computing G matrix with specific weight for each marker as # in Liu et al. (2020). Gmatrix_weighted <- Gmatrix(snp.sol, method="VanRaden", weights = runif(3895,0.001,0.1), ploidy=4) ``` More information about `Gmatrix` can be found with: ```{r, eval=FALSE} ?Gmatrix ``` ## Ratio (non-dosage) G matrix: relationship matrices using the molecular data without dosage calling Molecular markers data should be organized in a matrix format (individual in rows and markers in columns) coded according to a fraction that represents its molecular information, this can be any number between 0 to 1. Such ratio can represent the count of alternative alleles over the read depth for each individual-marker combination (GBS-like technique). It can be the signal of the alternative allele over the sum of the signals of the alternative and reference alleles (GCMS-like technique). It can also be used for family-pool genotypes. ```{r, eval=FALSE} #Loading the data library(AGHmatrix) data(snp.sol) snp.sol.ratio = snp.sol/4 #transforming it in a ratio of the minor allele frequency Gmatrix <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, ratio=FALSE) Gmatrix.ratio <- Gmatrix(snp.sol.ratio, method="VanRaden", ploidy=4, ratio=TRUE) Gmatrix[1:5,1:5]==Gmatrix.ratio[1:5,1:5] ## it also has the ploidy.correction option Gmatrix.alternative <- Gmatrix(snp.sol, method="VanRaden", ploidy=4, ratio=FALSE, ploidy.correction=TRUE) Gmatrix.ratio.alternative <- Gmatrix(snp.sol.ratio, method="VanRaden", ploidy=4, ratio=TRUE, ploidy.correction=TRUE) Gmatrix[1:5,1:5]==Gmatrix.alternative[1:5,1:5] Gmatrix.alternative[1:5,1:5]==Gmatrix.ratio.alternative[1:5,1:5] ``` ## Combined relationship matrix - H matrix H matrix is the relationship matrix using combined information from the pedigree and genomic relationship matrices. First, you need to compute the matrices separated and then use them as input to build the combined H matrix. Two methods are implemented: `Munoz` shrinks the G matrix towards the A matrix scaling the molecular relatadness by each relationship classes; `Martini` is a modified version from Legarra et al. 2009 where combines A and G matrix using scaling factors. As an example, here we build the matrices using data from Endelman et al. (2018) (`ped.sol` and `snp.sol`). ```{r, eval=FALSE} data(ped.sol) data(snp.sol) #Computing the numerator relationship matrix 10% of double-reduction Amat <- Amatrix(ped.sol, ploidy=4, w = 0.1) Gmat <- Gmatrix(snp.sol, ploidy=4, maf=0.05, method="VanRaden") Gmat <- round(Gmat,3) #see appendix #Computing H matrix (Martini) Hmat_Martini <- Hmatrix(A=Amat, G=Gmat, method="Martini", ploidy=4, missingValue=-9, maf=0.05) #Computing H matrix (Munoz) Hmat_Munoz <- Hmatrix(A=Amat, G=Gmat, markers = snp.sol, ploidy=4, method="Munoz", missingValue=-9, maf=0.05) ``` ## Covariance matrices due to epistatic terms Here we present how to compute the epistasis relationship matrices using Hadamard products (i.e. cell-by-cell product), denoted by `*`. For more information please see Munoz et al. (2014). In this example we are using the molecular-based relationship matrices. First, build the additive and dominance matrices: ```{r, eval=FALSE} data(snp.pine) A <- Gmatrix(SNPmatrix=snp.pine, method="VanRaden", missingValue=-9, maf=0.05) D <- Gmatrix(SNPmatrix=snp.pine, method="Vitezica", missingValue=-9,maf=0.05) ``` For the first degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive Interactions A_A <- A*A #Dominance-by-Additive Interactions D_A <- D*A #Dominance-by-Dominance Interactions D_D <- D*D ``` For the second degree epistatic terms: ```{r, eval=FALSE} #Additive-by-Additive-by-Additive Interactions A_A_A <- A*A*A #Additive-by-Additive-by-Dominance Interactions A_A_D <- A*A*D #Additive-by-Dominance-by-Dominance Interactions A_D_D <- A*D*D #Dominance-by-Dominance-by-Dominance Interactions D_D_D <- D*D*D ``` And so on... ## Exporting your matrix as three columns and sparse format (ASReml - csv format) That is the lower diagonal matrix formatted in three columns in .csv format (other ASCII extension could be used as well). In order to do this, we need to build a matrix, its inverse, and export it using `formatmatrix` function. ASReml can invert the relationship matrix as well, probably more efficiently than R for large matrices (i.e. `solve()` function), so no need to invert the matrix in R if matrix is large. This function has as options: `round.by`, which let you decide the number of decimals you want; `exclude.0`, if `TRUE`, remove all the zeros from your data (i.e., transforms into sparse); and, name that defines the name to be used in the exported file. Use the default if not sure what parameter use in these function. Here an example using `ped.mrode` data: ```{r, eval=FALSE} #Loading the data example data(ped.mrode) #Computing the matrix A <- Amatrix(data=ped.mrode, ploidy=4, w=0.1) #Building its inverse Ainv <- solve(A) #Exporting it. The function "formatmatrix" # will convert it and save in your working directory formatmatrix(Ainv, round.by=12, exclude.0=TRUE, name="Ainv") ``` ## Relationship matrices using pedigree data for polycrosses - A matrix (beta) Creates an additive relationship matrix A based on a non-deterministic pedigree with 4+ columns where each column represents a possible parent. This function was built with the following designs in mind. 1) A mating design where you have equally possible parents. For example, a generation of insects derived from the mating of three insects in a cage. All the insects in this generation will have the same expected relatedness with all the possible parents (1/3). If there are only two parents in the cage, the function assumes no-inbreeding and the pedigree is deterministic (the individual is offspring of the cross between the two parents). Another example, a population of 10 open-pollinated plants where you harvest the seeds without tracking the mother. 2) When `fixedParent` is TRUE: a mating design where you know one parent and might know the other possible parents. For example, a polycross design where you have seeds harvested from a mother plant and possible polén donors. The following pedigree has the id of the individual followed by possible parents. The possible parents are filled from left to right, in the `pedigree` data frame: id 1,2,3,4 have unknown parents and are assumed unrelated; id 5 has three possible parents (1,2,3); id 6 has three possible parents (2,3,4); id 7 has two parents (deterministic case here, the parents are 3 and 4); id 8 has four possible parents (5,6,7,1). ```{r} pedigree = data.frame(id=1:8, parent1 = c(0,0,0,0,1,2,3,5), parent2 = c(0,0,0,0,2,3,4,6), parent3 = c(0,0,0,0,3,4,0,7), parent4 = c(0,0,0,0,0,0,0,1), parent5 = 0) print(pedigree) AmatrixPolyCross(pedigree) ``` If `fixedParent=TRUE`, the above pedigree will be interpreted with the possible parents are filled from left to right after the known parent, in the `pedigree` data frame: id 1,2,3,4 have unknown parents and are assumed unrelated; id 5 is offspring of parent 1 in a deterministic way and two other possible parents (2,3); id 6 is offspring of parent 2 in a deterministic way and two other possible parents (3,4); id 7 has two parents (deterministic case here, the parents are 3 and 4); as before; id 8 is offspring of parent 5 in a deterministic way and has three other possible parents (6,7,1). ```{r} AmatrixPolyCross(pedigree,fixedParent=TRUE) ``` ## Amatrix() benchmark It follows a small memory and computational time profiling for the `Amatrix()` function. The required RAM was computed based on the peak of the process for different pedigree sizes (based on /usr/bin/time -v output). The time profiling was done using AMD Milan 2.95GHz, so it might be an underestimated value when compared with lower speed processors. Numerator relationship matrices for pedigrees with less than 20,000 rows can built with low-specs user-end machines (<8GB RAM) using our package. ```{r, eval=TRUE,echo=FALSE} x = c(1000,5000,10000,20000,30000,40000,50000,60000,70000,80000,90000,100000)/1000 #Pedigree Size y = c(252156,622500,1795260,6481064,14313448,25227680,49081224,70622336,96017144,125320048,158444856,194731908)/1e+6 #RAM GB ytime = c(0.0025, 0.080, 0.2, 0.89, 1.62,3.01,4.52,7.12,9.15,13.13,15.13,20) #minutes df = data.frame(size=x,ram=y,time=ytime) plot(x=df$size,y=df$ram, ylab = "RAM (GB) at the peak of Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", type="b", axes=FALSE) axis(side = 2, at = c(0,4,8,16,32,48,64,96,144,192),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) plot(x=df$size,y=df$time, type="b", ylab = "Time to run (minutes) the Amatrix() function", xlab = "Pedigree size (in 1,000 rows)", axes=FALSE) axis(side = 2, at = seq(0,20,2),cex.axis=.75) axis(side = 1, at = c(1,5,10,20,30,40,50,60,70,80,90,100),cex.axis=.75) ``` ## Bibliography Amadeu, RR, et al. 2023 AGHmatrix: genetic relationship matrices in R. Bioinformatics 39, 7. Amadeu, RR, et al. 2016 AGHmatrix: R package to construct relationship matrices for autotetraploid and diploid species: a blueberry example. The Plant Genome 9, 4. Ashraf, BH, et al. 2016. Estimating genomic heritabilities at the level of family-pool samples of perennial ryegrass using genotyping-by-sequencing. Theoretical and Applied Genetics 129, 45-52. Cockerham, CC. 1954 An extension of the concept of partitioning hereditary variance for analysis of covariances among relatives when epistasis is present. Genetics 39, 859–882. de Bem Oliveira, I, et al. 2019 Genomic prediction of autotetraploids; influence of relationship matrices, allele dosage, and continuous genotyping calls in phenotype prediction. G3: Genes, Genomes, Genetics, 9(4), pp.1189-1198. Endelman, JB, et al. 2018 Genetic variance partitioning and genome-wide prediction with allele dosage information in autotetraploid potato. Genetics 209, 77-87. Hamilton, MG, et al. 2017 Computation of the inverse additive relationship matrix for autopolyploid and multiple-ploidy populations. Theoretical and Applied Genetics 131, 851-890. Henderson, C. 1976 A simple method for computing the inverse of a numerator relationship matrix used in prediction of breeding values. Biometrics 32, 69–83. Kerr, RJ, et al., 2012 Use of the numerator relation ship matrix in genetic analysis of autopolyploid species. Theoretical and Applied Genetics 124, 1271–1282. Legarra, A, et al. 2009 A relationship matrix including full pedigree and genomic information. Journal of Dairy Science 92, 4656–4663. Liu, A, et al. 2020. Weighted single-step genomic best linear unbiased prediction integrating variants selected from sequencing data by association and bioinformatics analyses. Genet Sel Evol 52, 48. Martini, JW, et al. 2018, The effect of the H$^{1}$ scaling factors $\tau$ and $\omega$ on the structure of H in the single-step procedure. Genetics Selection Evolution 50(1), 16. Mrode, RA. 2014 *Linear models for the prediction of animal breeding values*. Cabi. 3rd ed. Munoz, PR, et al., 2014 Unraveling additive from nonadditive effects using genomic relationship matrices. Genetics 198, 1759–1768. R Core Team, 2016 *R*: A Language and Environment for Statistical Computing. R Foundation for Statistical Computing, Vienna, Austria. Resende, MF, et al. 2012 Accuracy of genomic selection methods in a standard data set of loblolly pine (*Pinus taeda* l.). Genetics 190, 1503–1510. Slater, AT, et al. 2014 Improving the analysis of low heritability complex traits for enhanced genetic gain in potato. Theoretical and applied genetics 127, 809–820. Slater, AT, et al. 2016 Improving genetic gain with genomic selection in autotetraploid potato. The Plant Genome 9. Su, G, et al. 2012 Estimating additive and non-additive genetic variances and predicting genetic merits using genome-wide dense single nucleotide polymorphism markers. PloS one 7, e45293. https://doi.org/10.1371/journal.pone.0045293 VanRaden, P. 2008 Efficient methods to compute genomic predictions. Journal of dairy science 91, 4414–4423. Vitezica, ZG, et al. 2013 On the additive and dominant variance and covariance of individuals within the genomic selection scope. Genetics 195, 1223–1230. Yang, J, et al. 2010 Common snps explain a large proportion of the heritability for human height. Nature genetics 42, 565–569. ```{r,eval=FALSE,echo=FALSE} #To knit an this vignette into an .R file knitr::purl("vignettes/Tutorial_AGHmatrix.Rmd") ``` ```{r} sessionInfo() ```
/scratch/gouwar.j/cran-all/cranData/AGHmatrix/vignettes/Tutorial_AGHmatrix.Rmd
includeLag <- function(dataset,VAR,var.agg,lags=1){ dataset0 <- dataset dataset0[['Anno']] <- dataset[['Anno']] + lags dataset <- plyr::join(dataset,dataset0[,c('Anno',var.agg,VAR)], by=c('Anno',var.agg),type='left',match='first') names(dataset)[ncol(dataset)] <- paste(VAR,paste('_',lags,sep=''),sep='') return(dataset) } fdiff <- function(dataset,VAR,var.agg){ datasetlag <- subset(dataset[,c(VAR,var.agg)],dataset$Anno<max(dataset$Anno)) datasetlag$Anno <- datasetlag$Anno+1 dataset <- plyr::join(dataset,datasetlag,by=var.agg) VARlag <- paste(VAR,'_1',sep='') names(dataset)[ncol(dataset)]=VARlag dataset <- subset(dataset,dataset$Anno > min(dataset$Anno)) VARdiff <- paste('diff_',VAR,sep='') dataset[[VARdiff]] <- dataset[[VAR]] - dataset[[VARlag]] return(dataset[,c(var.agg,VAR,VARlag,VARdiff)]) } #' BCML estimator #' #' This function estimates a space time linear model according to the specified formula. It implements the BCML (or BCLSDV) estimator as in Elhorst (2010) \doi{10.1016/j.regsciurbeco.2010.03.003}. #' #' @param dataset STFDF with the data #' @param yearStart First year considered in the estimation #' @param yearEnd Last Anno considered in the estimation #' @param var.agg Index of the spatial units #' @param eq Formula to be estimated. It excludes the spatial lag #' @param spatial Radius to define neighbors #' @param estimation Either 'analytical' or 'numerical'. If 'analytical' is specified the concentrated maximum likelihood is estimated and then the other parameters are obtained analytically. Otherwise, all parameters are obtained through numerical maximization of the log-likelihood function. #' @param corrBIAS Boolean. If TRUE, the bias correction is applied. #' @param WMAT The spatial weight matrix #' #' @return A list with two objects. The first object is the estimates table. The second object is the log-likelihood evaluated at its maximum #' @examples #' \donttest{ #' library(maxLik) #' library(matrixcalc) #' #' set.seed(123) #' sd = sim_data_fe(dataset=regsamp,N=100,TT=8, #' spatial = 80,Tau = -0.2,Rho = 0.4, #' Beta = 2,sdDev = 2,startingT = 10, #' LONGLAT = TRUE) #' est_bcml = bcml(dataset = sd[[1]],yearStart = 3,yearEnd = 9, #' var.agg = 'Cod_Provincia',eq = Y~X1, #' estimation = 'analytical',corrBIAS = TRUE,WMAT = sd[[2]]) #' est_bcml #' } #' #' @export bcml <- function( dataset, yearStart, yearEnd, var.agg='Cod_Provincia', eq, spatial = NULL, estimation='analytical', corrBIAS=TRUE, WMAT = NULL){ if(class(dataset) %in% c('sp','STFDF')){ dataset0 <- dataset@data }else{ dataset0 <- dataset } Ninit <- length(levels(as.factor(dataset0[[var.agg]]))) Tinit <- nrow(dataset0)/Ninit N <- length(levels(as.factor(dataset0[[var.agg]]))) TT <- nrow(dataset0)/N iota <- rep(1,TT) Q <- kronecker(diag(TT) - (1/TT)*iota%*%t(iota),diag(N)) # this matrix is if(is.null(WMAT)){ if(is.numeric(spatial) & !(spatial %in% c(0,'continuous','queen'))){ dmax=spatial textDist = paste('dist',paste(dmax,'km')) dataset = sp::spTransform(dataset,sp::CRS("+proj=longlat")) netw2 = spdep::dnearneigh(sp::coordinates(dataset@sp),d1=0,d2=dmax,longlat=FALSE) matw0 = spdep::nb2mat(netw2,zero.policy=TRUE) } }else{ matw0=WMAT } dataset0 <- dataset0[order(dataset0$Anno, dataset0[[var.agg]], decreasing=FALSE),] Yvar <- as.character(eq[[2]]) sYvar <- paste('spat.',Yvar,sep='') dataset0[[sYvar]] = kronecker(diag(TT),matw0)%*%as.matrix(dataset0[,Yvar]) Xvars <- strsplit(as.character(eq)[[3]],split='\\+')[[1]] Xvars <- gsub("\n ","",Xvars) Xvars <- gsub(" ","",Xvars) EQvars <- c(Yvar,sYvar,Xvars) for(j in EQvars){ dataset0[[paste('dm.',j,sep='')]] = Q %*% dataset0[,j] } EQvarsQ <- paste('dm.',EQvars,sep='') EQvarsALL <- c(EQvars,EQvarsQ) for(i in EQvarsALL){ dataset0 <- includeLag(dataset=dataset0, VAR=i, var.agg=var.agg) } EQvarsALL1 <- paste(EQvarsALL,'_1',sep='') dataset0 <- subset(dataset0,dataset0$Anno>=yearStart & dataset0$Anno<=yearEnd) N <- length(levels(as.factor(dataset0[[var.agg]]))) TT <- nrow(dataset0)/N iota <- rep(1,TT) Q <- kronecker(diag(TT) - (1/TT)*iota%*%t(iota),diag(N)) # this matrix is Xstar <- as.matrix(dataset0[,paste('dm.',Xvars,sep='')]) colnames(Xstar) <- paste('dm.',Xvars,sep='') Y_1star <- Q%*%as.matrix(dataset0[,paste(Yvar,'_1',sep='')]) X_1star <- Q%*%as.matrix(dataset0[,paste(Xvars,'_1',sep='')]) colnames(Y_1star) = paste('dm.',paste(Yvar,'_1',sep=''),sep='') colnames(X_1star) = paste('dm.',paste(Xvars,'_1',sep=''),sep='') XXstar <- as.matrix(cbind(Y_1star,Xstar)) XX_1star <- as.matrix(cbind(Y_1star,X_1star)) colnames(XXstar) <- c(colnames(Y_1star),colnames(Xstar)) colnames(XX_1star) <- c(colnames(Y_1star),colnames(X_1star)) Ystar0 <- as.matrix(dataset0[,paste('dm.',Yvar,sep='')]) colnames(Ystar0) <- paste('dm.',Yvar,sep='') Ystar1 <- as.matrix(dataset0[,paste('dm.spat.',Yvar,sep='')]) colnames(Ystar1) <- paste('dm.spat.',Yvar,sep='') par0 <- solve(t(XXstar)%*%XXstar)%*%t(XXstar)%*%Ystar0 names(par0) <- rownames(par0) mod0 <- XXstar%*%par0 e0 <- Ystar0-mod0 par1 <- solve(t(XXstar)%*%XXstar)%*%t(XXstar)%*%Ystar1 names(par1) <- rownames(par1) mod1 = XXstar%*%par1 e1 <- Ystar1-mod1 llrho <- function(rho){ ll <- -((N*TT)/2)*log(t(e0 - rho*e1)%*%(e0-rho*e1)) + TT*log(det(diag(N)-rho*matw0)) return(ll) } LL <- function(pa){ -((N*TT)/2)*log(2*pi*pa[1]) + TT * log(det(diag(N) - pa[2]*matw0)) - (1/(2*pa[1]))* t(Ystar0 - pa[3]*Y_1star - pa[2]*Ystar1- Xstar%*%as.matrix(pa[-c(1:3)]))%*% (Ystar0 - pa[3]*Y_1star - pa[2]*Ystar1- Xstar%*%as.matrix(pa[-c(1:3)])) } if(estimation == 'numerical'){ mle <- maxLik::maxLik(LL, start=c(stats::runif(1,0,10), stats::runif(1,-1,1), stats::runif(1,-1,1), stats::rnorm(ncol(Xstar))), method='BFGS',hess=NULL) PA <- stats::coef(mle) Parms <- c(PA[3],PA[2],PA[-c(1:3)],PA[1]) names(Parms)[1:2] <- c(colnames(Y_1star),colnames(Ystar1)) names(Parms)[-c(1,2,length(Parms))] <- colnames(Xstar) names(Parms)[length(Parms)] <- 'sigma2' rho <- Parms[2] Tau <- Parms[1] sigma2 <- Parms[length(Parms)] H <- mle$hessian Vars <- -H Vars0 <- solve(-H) maxLL <- mle[1] }else{ A <- matrix(c(1,-1),nrow=2,ncol=1) B <- matrix(c(2,1),nrow=2,ncol=1) startingP <- stats::runif(n=1,min=-1,max=1) estRho <- maxLik::maxLik(llrho,start=startingP, constraints=list(ineqA=A,ineqB=B), method='BFGS',hess=NULL) rho <- stats::coef(estRho) maxLL <- estRho[1] if(length(rho)==0){ rho <- 0 maxLL <- NA } beta0 <- par0 beta1 <- par1 betas <- beta0 - rho*beta1 rownames(betas)[1] <- 'Y_1' names(betas) <- rownames(betas) parms <- solve(t(XXstar)%*%XXstar)%*%t(XXstar)%*% (Ystar0-rho*kronecker(diag(TT),matw0)%*%Ystar0) rownames(parms)[1] <- 'Y_1' sigma2 <- as.numeric((1/(N*TT))*t(e0-rho*e1)%*%(e0-rho*e1)) Parms <- c(parms[1],rho,parms[-1],sigma2) names(betas) <- gsub('XXstar','',names(betas)) names(Parms) <- c(names(betas)[1],'rho',names(betas)[-1],'sigma2') Y_1stara <- (Y_1star-mean(Y_1star))/stats::sd(Y_1star) ADJ_MAT <- kronecker(diag(TT),matw0) Wtilde <- matw0%*%solve(diag(N)-rho*matw0) var1 <- (1/sigma2)*t(Y_1star)%*%(Y_1star) var2 <- (1/sigma2)*t(Y_1star)%*%kronecker(diag(TT),Wtilde)%*% Y_1star*betas[1] var3 <- TT*matrixcalc::matrix.trace(Wtilde%*%Wtilde + t(Wtilde)%*%Wtilde) + (1/sigma2)*t(parms)%*%t(XXstar)%*% (kronecker(diag(TT),t(Wtilde)%*%Wtilde))%*%XXstar%*%parms var4 = (1/sigma2)*t(Xstar)%*%Y_1star var5 <- (1/sigma2)*t(Xstar)%*%kronecker(diag(TT),Wtilde)%*% Xstar%*%parms[-1] var6 <- (1/sigma2)*t(Xstar)%*%Xstar var7 <- (TT/sigma2)*matrixcalc::matrix.trace(Wtilde) var8 <- (N*TT)/2*((sigma2)^2) Vars <- matrix(0,nrow=length(betas)+2,ncol=length(betas)+2) Vars[1,1] <- var1 Vars[2,1] <- var2; Vars[1,2] = var2 Vars[2,2] <- var3 Vars[3:(3+(length(parms[-1])-1)),1] <- var4 Vars[1,3:(3+(length(parms[-1])-1))] <- var4 Vars[3:(3+(length(parms[-1])-1)),2] <- var5 Vars[2,3:(3+(length(parms[-1])-1))] <- var5 Vars[3:(3+(length(parms[-1])-1)), 3:(3+(length(parms[-1])-1))] <- var6 Vars[nrow(Vars),2] <- var7 Vars[2,nrow(Vars)] <- var7 Vars[nrow(Vars),nrow(Vars)] <- var8 Vars0 <- solve(Vars) Tau <- Parms[1] } std.err <- sqrt(diag(Vars0)) Bias <- matrix(0,nrow=length(Parms),ncol=1) Bias[1] <- (1/N)*matrixcalc::matrix.trace(solve((1-Tau)*diag(N)-rho*matw0)) Bias[2] <- (1/N)*matrixcalc::matrix.trace(matw0%*%(diag(N)-rho*matw0)%*% solve((1-Tau)*diag(N)-rho*matw0))+ (1/N)*matrixcalc::matrix.trace(matw0%*%(diag(N)-rho*matw0)) Bias[nrow(Bias)] <- 1/(2*sigma2) if(corrBIAS==TRUE){ BCLSDV <- Parms - solve((1/(N*TT))*(-Vars))%*%((1/TT)*(Bias)) }else{ BCLSDV <- Parms } tab <- data.frame( variable = as.character(names(Parms)), estimate = round(BCLSDV,6), std.err = round(std.err,6), t.value = round(BCLSDV/std.err,6)) tab$p.value <- round(2*stats::pt(-abs(tab$t.value),df=N*TT-nrow(tab)),6) rownames(tab) <- 1:nrow(tab) tab$variable <- as.character(tab$variable) tab$variable[1] <- paste('dm.',paste(Yvar,'_1',sep=''),sep='') tab$variable[2] <- paste('dm.spat.',Yvar,sep='') tab$signif <- '' tab$signif[tab$p.value < 0.1] = '.' tab$signif[tab$p.value < 0.05] = '*' tab$signif[tab$p.value < 0.01] = '**' tab$signif[tab$p.value < 0.001] = '***' tab2 <- data.frame( variable = names(Parms), estimate = round(Parms,6), std.err = round(std.err,6), t.value = round(Parms/std.err,6)) tab2$p.value <- round(2*stats::pt(-abs(tab2$t.value),df=N*TT-nrow(tab2)),6) rownames(tab2) <- 1:nrow(tab2) yhat <- as.matrix(dataset0[,as.character( tab$variable[-length(tab$variable)])])%*% as.matrix(tab[-nrow(tab),2]) uhat <- dataset0[[Yvar]] - yhat aic <- 2*(nrow(tab)-1)-2*as.numeric(maxLL) rm(tab2,Bias,dataset0,matw0,uhat,yhat,aic) out <- list(tab,maxLL) names(out) <- c('estimates','maxll') return(out) } #' Space-time bayesian INLA estimator #' #' This function estimates a space time linear model using the bayesian INLA. It is a wrapper of the INLA::inla function (Lindgren and Rue (2015) \doi{10.18637/jss.v063.i19}; Bivand, Gomez-Rubio and Rue (2015) \doi{10.18637/jss.v063.i20}) adapted to panel data. #' #' #' @param formula Formula of the model to be estimated #' @param d Data frame #' @param W Spatial matrix #' @param RHO Parameter of spatial dependence #' @param PHI Parameter of temporal dependence #' @param var.agg Indexes of the panel dimensions. The first argument is the spatial dimension, the second argument is the temporal dimension. #' @param normalization Boolean. If TRUE the data are normalized before estimation #' @param improve Please refer to the documentation of the INLA package #' @param fhyper Plase refer to the documentation of the INLA package #' @param probit Plase refer to the documentation of the INLA package #' @param ... additional parameters. Please, refer to the documentation of the INLA package #' #' #' @return Returns a model of class "inla". Please, refer to the documentation of the INLA package for additional information #' @examples #' \donttest{ #' #' set.seed(123) #' sd = sim_data_fe(dataset=regsamp,N=100,TT=8,spatial = 80, #' Tau = -0.2,Rho = 0.4, Beta = 2,sdDev = 2, #' startingT = 10,LONGLAT = TRUE) #' est_inla = inla.st(formula = Y~-1+X1,d = sd[[1]]@data, #' W = sd[[2]],PHI=-0.2,RHO=0.4, #' var.agg=c('Cod_Provincia','Anno'), #' family='gaussian', #' improve=TRUE, #' normalization=FALSE, #' control.family = list(hyper = list(prec=list(initial=25,fixed=TRUE))), #' control.predictor = list(compute = TRUE), #' control.compute = list(dic = TRUE, cpo = TRUE), #' control.inla = list(print.joint.hyper = TRUE)) #' summary(est_inla) #' } #' #' @export inla.st = function (formula, d, W, RHO,PHI, var.agg, normalization=FALSE, improve = TRUE, fhyper = NULL, probit = FALSE, ...) { if(normalization == TRUE){ novaragg <- stats::model.matrix(formula,d) did <- d[,var.agg] ydata <- as.matrix(d[,as.character(formula)[2]]) colnames(ydata) <- as.character(formula)[2] novaragg <- cbind(ydata,novaragg) d <- apply(novaragg,2,function(x) (x-mean(x))/stats::sd(x)) d <- cbind(did,d) } d <- d[order(d[[var.agg[2]]],d[[var.agg[1]]],decreasing=FALSE),] d$idx <- 1:nrow(d) N <- length(levels(as.factor(d[[var.agg[1]]]))) TT <- nrow(d)/N iota <- rep(1,TT) Q <- kronecker(diag(TT) - (1/TT)*iota%*%t(iota),diag(N)) mmatrix0 <- stats::model.matrix(formula, d) mmatrix <- mmatrix0 IrhoWst <- effectsST(dataset=d, var.agg=var.agg[1], W=W, Tau=c(as.numeric(PHI)), Rho=c(as.numeric(RHO)), periods=TT) IrhoWst <- methods::as(IrhoWst, "sparseMatrix") invIrhoWst <- solve(IrhoWst) IrhoW2 <- Matrix::crossprod(IrhoWst) assign("IrhoW2", IrhoW2, environment(formula)) mm <- as.data.frame(as.matrix(invIrhoWst %*% mmatrix)) mmSTAR = as.data.frame(Q %*% as.matrix(mm)) names(mmSTAR) <- paste("x", 1:ncol(mmSTAR), sep = "") xnam <- names(mmSTAR) d2 <- cbind(d, mmSTAR) fmla <- paste(as.character(formula)[2], "~ -1+", paste(xnam, collapse = "+")) if (is.null(fhyper)){ fmla <- paste(fmla, "+f(idx, model=\"generic0\", Cmatrix=IrhoW2)", sep = "") }else{ fmla <- paste(fmla, "+f(idx, model=\"generic0\", Cmatrix=IrhoW2, hyper=fhyper)", sep = "") } fmla <- stats::as.formula(fmla) res <- INLA::inla(fmla, data = d2, ...) if (improve){ res <- INLA::inla.rerun(res) } res$logdet <- as.numeric(Matrix::determinant(IrhoW2)$modulus) res$mlik <- res$mlik + res$logdet/2 res$impacts <- FALSE return(res) } getYtilde <- function(dataset,yvar,xvar,wmat,var.agg,Rho,Tau){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') datalag <- fdiff(dataset,VAR=yvar,var.agg=var.agg)[,c(1,2,4)] dataset0 <- plyr::join(dataset,datalag,type='right') N <- nrow(wmat) TT <- nrow(dataset)/N datalagsX <- list() Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) xs <- xvar for(i in 2:length(set_Annos)){ temp <- subset(dataset,dataset$Anno == set_Annos[i]) names(temp)[which(!names(temp) %in% var.agg)] <- paste(names(temp)[which(!names(temp) %in% var.agg)],i-1,sep='_') temp[[var.agg0]] <- Anno1 dataX <- plyr::join(dataX,temp,by=var.agg) xs <- c(xs,paste(xvar,i-1,sep='_')) } dataX0 <- cbind(1,dataX[,xs]) names(dataX0)[1] <- 'const' nAlphas <- ncol(dataX0) err_init <- (diag(N)-Rho*wmat)%*%as.matrix(dataX[,yvar]) split_data <- split(dataset0,f=datalag[[var.agg0]]) N <- nrow(split_data[[1]]) err_data <- lapply(split_data, function(w) (diag(N)-Rho*wmat)%*%as.matrix(w[,yvar]) - Tau * as.matrix(w[,yvar1]) ) err_tab <- do.call('rbind',err_data) err_tab1 <- rbind(err_init,err_tab) return(err_tab1) } getXtildeM <- function(dataset,yvar,xvarm,xvar,wmat,var.agg,Rho){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') N <- nrow(wmat) TT <- nrow(dataset)/N+1 Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) dataX <- dataX[,names(dataX) %in% xvar] Xtilde <- matrix(0,nrow=(nrow(dataX)*(TT-1)), ncol=1+((TT-1)*length(xvarm)+length(xvar))) posI <- 1:N posX2 <- 1:ncol(Xtilde) posX3 <- posX2[!posX2 %in% c(1:(1+(TT-1)*length(xvarm)))] Xtilde[posI,1] <- 1 Xtilde[posI,(1+1):(1+length(xvar))] <- as.matrix(dataX[,which(!names(dataX) %in% c(var.agg,yvar))]) for(i in 2:(length(set_Annos))){ temp0 <- subset(dataset,dataset$Anno == set_Annos[i]) temp <- temp0[,names(temp0) %in% xvar] temp00 <- temp0[,names(temp0) %in% xvarm] if(length(posX3)>1){ temp1 <- as.matrix(temp[,which(!names(temp) %in% c(var.agg,yvar))]) temp10 <- as.matrix(temp00[,which(!names(temp00) %in% c(var.agg,yvar))]) lx <- 1+length(xvarm)*(i-1)+1 lx2 <- lx+length(xvarm)-1 }else{ temp1 <- temp[,which(!names(temp) %in% c(var.agg,yvar))] lx <- 1+length(xvarm)*i lx2 <- lx } Xtilde[posI,lx:lx2] <- temp10 Xtilde[((i-1)*N+1):(i*N),posX3] <- temp1 } corX <- stats::cor(Xtilde) dupX <- which(duplicated(corX[,1])) if(length(dupX)>0){ Xtilde2 <- Xtilde[,-dupX] }else{ Xtilde2 <- Xtilde } return(Xtilde2) } uml_err_fullM <- function(dataset,yvar,xvarm,xvar,wmat,var.agg,Rho,Tau,Beta,Alphas){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') N <- nrow(wmat) datalag <- fdiff(dataset,VAR=yvar,var.agg=var.agg)[,c(1,2,4)] dataset0 <- plyr::join(dataset,datalag,type='right') datalagsX <- list() Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) dataX <- dataX[,names(dataX) %in% c(xvar,var.agg,yvar)] xs <- xvar for(i in 2:length(set_Annos)){ temp <- subset(dataset,dataset$Anno == set_Annos[i]) names(temp)[which(!names(temp) %in% var.agg)] <- paste(names(temp)[which(!names(temp) %in% var.agg)],i-1,sep='_') temp[[var.agg0]] <- Anno1 dataX <- plyr::join(dataX,temp,by=var.agg) xs <- c(xs,paste(xvar,i-1,sep='_')) } less2 <- c(grep('_t1',xs),grep('_lag',xs)) if(length(less2) > 0){ xs <- xs[-less2] }else{ xs <- xs } dataX0 <- cbind(1,dataX[,xs]) names(dataX0)[1] <- 'const' nAlphas <- ncol(dataX0) err_init <- (diag(N)-Rho*wmat)%*%as.matrix(dataX[,yvar]) - as.matrix(dataX0)%*%(Alphas) split_data <- split(dataset0,f=datalag[[var.agg0]]) N <- nrow(split_data[[1]]) err_data <- lapply(split_data, function(w) (diag(N)-Rho*wmat)%*%as.matrix(w[,yvar]) - Tau * as.matrix(w[,yvar1]) - as.matrix(w[,xvar]) %*% as.matrix(Beta)) err_tab <- do.call('rbind',err_data) err_tab1 <- rbind(err_init,err_tab) return(err_tab1) } getPI <- function(wmat,Tau,Rho){ N <- nrow(wmat) PI <- Tau*solve(diag(N) - Rho*wmat) return(PI) } getV <- function(PI,m){ N <- nrow(PI) PIm <- PI for(i in 1:(2*m-1)){ PIm <- PIm %*%PI } V <- 2*solve(diag(N)+PI)%*%(diag(N) + PIm) return(V) } getVtheta = function(Sigma2,Sigma2xi,V){ N <- nrow(V) theta <- (Sigma2/Sigma2xi) Vtheta <- (theta*diag(N)+V) return(Vtheta) } getHv <- function(Vtheta,N,TT){ Hv <- diag(N*TT)*2 for(i in 1:(TT-1)){ Hv[(1:N)+N*i,(1:N)+N*(i-1)] <- -diag(N) Hv[(1:N)+N*(i-1),(1:N)+N*i] <- -diag(N) } Hv[1:N,1:N] <- Vtheta return(Hv) } uml_data <- function(dataset,ff,var.agg){ y <- gsub('//(//)','',ff[2]) x0 <- gsub('//(//)','',ff[3]) x0 <- gsub(' ','',x0) x <- strsplit(x0,split='\\+')[[1]] z <- c(y,x) fdata <- fdiff(dataset,VAR=z[1],var.agg=var.agg)[,c(1,2,5)] for(i in 2:length(z)){ fdata <- plyr::join(fdata,fdiff(dataset,VAR=z[i],var.agg=var.agg)[,c(1,2,5)]) } return(fdata) } effectsST <- function(dataset, var.agg, W,Tau, Rho, periods=NULL){ N <- length(levels(as.factor(dataset[[var.agg]]))) if(is.null(periods)){ TT <- length(min(dataset$Anno):max(dataset$Anno)) }else{ TT <- periods } C <- -(diag(N)*Tau) B <- (diag(N) - Rho*W) EFF <- matrix(0,ncol=N*TT,nrow=N*TT) seqB1 <- list() seqB2 <- list() seqC1 <- list() seqC2 <- list() for(i in 1:TT){ seqB1[[i]] <- (1+(N*(i-1))) seqB2[[i]] <- N*i seqC1[[i]] <- (1+(N*(i))) seqC2[[i]] <- N*(i+1) } for(j in 1:(TT)){ EFF[seqB1[[j]]:seqB2[[j]], seqB1[[j]]:seqB2[[j]]] <- B } for(k in 1:(TT-1)){ EFF[seqC1[[k]]:seqC2[[k]], seqB1[[k]]:seqB2[[k]]] <- C } return(EFF) } #' MML estimator #' #' This function estimates a space time linear model according to the specified formula using the ML estimator as in Elhorst (2010) \doi{10.1016/j.regsciurbeco.2010.03.003}. The estimator maximizes the full log-likelihood function in which the parameter of spatial dependence is constrained. #' #' @param Rho the constrained parameter of spatial dependence #' @param ff Formula of the linear model. It excludes the spatial lag #' @param dataset Data frame with the data #' @param wmat Spatial weight matrix #' @param var.agg Spatial index of the data frame #' @param m How many time periods have passed since the beginning of the space-time process #' #' @return The estimates tables #' @examples #' \donttest{ #' #' set.seed(123) #' sd = sim_data_fe(dataset=regsamp,N=50,TT=8, #' spatial = 80,Tau = -0.2,Rho = 0.4, #' Beta = 2,sdDev = 2,startingT = 10, #' LONGLAT = TRUE);sd[[1]]$X2 = stats::rnorm(nrow(sd[[1]]@data)) #' est_mml = mml(dataset = sd[[1]]@data,Rho = 0.4, #' ff = Y~X1+X2, #' wmat = sd[[2]],var.agg = c('Anno','Cod_Provincia'), #' m = 10) #' est_mml #' } #' #' @export mml = function(Rho, ff,dataset,wmat,var.agg,m=10){ dataset0 <- uml_data( dataset=dataset, ff=ff, var.agg=var.agg) N <- nrow(wmat) TT <- nrow(dataset)/N yvar <- gsub('//(//)','',ff[2]) x0 <- gsub('//(//)','',ff[3]) x0 <- gsub(' ','',x0) xvar <- strsplit(x0,split='\\+')[[1]] yvar0 <- paste('diff_',yvar,sep='') yvar1 <- paste(yvar0,'_1',sep='') xvar0 <- paste('diff_',xvar,sep='') less1 <- c(grep('_t1',xvar0),grep('_lag',xvar0)) if(length(less1)>0){ xvar0m <- xvar0[-less1] }else{ xvar0m <- xvar0 } LL <- function(pa){ TAU = pa[1] SIGMA2 = pa[2] SIGMA2XI = pa[3] ytilde <- getYtilde(dataset=dataset0, yvar=yvar0, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU) xtilde <- getXtildeM(dataset=dataset0, yvar=yvar0, xvarm=xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho) PI <- getPI(wmat=wmat,Tau=TAU,Rho=Rho) V <- getV(PI=PI,m=m) Vtheta <- getVtheta(Sigma2=SIGMA2,Sigma2xi=SIGMA2XI,V=V) Hv <- getHv(Vtheta=Vtheta,N=N,TT=TT-1) rm(BETAS) rm(coeff2) invHv <- solve(Hv) coeff2 <- try(solve(t(xtilde)%*%invHv%*%xtilde)%*% t(xtilde)%*%invHv%*%ytilde) whichNO <- which(apply(xtilde,2,function(x) sum(x) ==0)) names(coeff2)[1:length(coeff2)] <- 'beta' names(coeff2)[1:(length(coeff2)-length(xvar))] <- 'alpha' alphas2 <- coeff2[which(names(coeff2) == 'alpha')] betas2 <- coeff2[which(names(coeff2) == 'beta')] derr_full <- uml_err_fullM(dataset=dataset0, yvar= yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU, Beta=betas2, Alphas=alphas2) ll <- -(1/2)*N*(TT) * log(2*pi*SIGMA2) + (TT)*log(det(diag(N) - Rho*wmat)) - (1/2)*log(det(Hv)) - (1/(2*SIGMA2))*t(derr_full)%*%invHv%*%derr_full if(!is.finite(as.numeric(ll))){ll <- -1.0e+10} print('------------------') print('parameters step1') print(paste('TAU:',TAU)) print(paste('SIGMA2:',SIGMA2)) print(paste('SIGMA2XI:',SIGMA2XI)) print(paste('log-likelihood:',ll)) return(ll) } constrTAU <- 1-(abs(Rho)+0.05) constrTAU <- 1-(abs(Rho)+0.2) A <- matrix(c(-1,1,0,0,0,0,1,0,0,0,0,1),nrow=4) B <- matrix(c(constrTAU,-0.001,-0.0001,-0.0001)) mle <- maxLik::maxLik(LL, start=c(0.002,0.1,0.1), constraints=list(ineqA=A,ineqB=B), method='BFGS',hess=NULL) TAUhat <- mle$estimate[1] SIGMA2hat <- mle$estimate[2] SIGMA2XIhat <- mle$estimate[3] curenv = environment() LL2 <- function(pa){ TAU = pa[1] print('------------------') print('parameters step2') print(paste('TAU:',TAU)) PI <- getPI(wmat=wmat,Tau=TAU,Rho=Rho) V <- getV(PI=PI,m=m) Vtheta <- getVtheta(Sigma2=SIGMA2hat,Sigma2xi=SIGMA2XIhat,V=V) Hv <- getHv(Vtheta=Vtheta,N=N,TT=TT-1) ytilde <- getYtilde(dataset=dataset0, yvar=yvar0, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU) xtilde <- getXtildeM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho) invHv <- solve(Hv) coeff2 <- try(solve(t(xtilde)%*%invHv%*%xtilde)%*% t(xtilde)%*%invHv%*%ytilde) whichNO <- which(apply(xtilde,2,function(x) sum(x) ==0)) names(coeff2)[1:length(coeff2)] <- 'beta' names(coeff2)[1:(length(coeff2)-length(xvar))] <- 'alpha' alphas2 <- coeff2[which(names(coeff2) == 'alpha')] betas2 <- coeff2[which(names(coeff2) == 'beta')] derr_full <- uml_err_fullM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU, Beta=betas2, Alphas=alphas2) sigma2_glob <- (t(derr_full)%*%invHv%*%derr_full)/(N*TT) ll <- -(1/2)*N*(TT) * log(2*pi*sigma2_glob) + (TT)*log(det(diag(N) - Rho*wmat)) - (1/2)*log(det(Hv)) - (1/(2*sigma2_glob))*t(derr_full)%*%invHv%*%derr_full #assign('sigma2_glob',sigma2_glob,envir=curenv) if(!is.finite(as.numeric(ll))){ll=-1.0e+10} return(ll) } LL3 <- function(pa){ TAU = pa[1] print('------------------') print('parameters step2') print(paste('TAU:',TAU)) PI <- getPI(wmat=wmat,Tau=TAU,Rho=Rho) V <- getV(PI=PI,m=m) Vtheta <- getVtheta(Sigma2=SIGMA2hat,Sigma2xi=SIGMA2XIhat,V=V) Hv <- getHv(Vtheta=Vtheta,N=N,TT=TT-1) ytilde <- getYtilde(dataset=dataset0, yvar=yvar0, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU) xtilde <- getXtildeM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho) invHv <- solve(Hv) coeff2 <- try(solve(t(xtilde)%*%invHv%*%xtilde)%*% t(xtilde)%*%invHv%*%ytilde) whichNO <- which(apply(xtilde,2,function(x) sum(x) ==0)) names(coeff2)[1:length(coeff2)] <- 'beta' names(coeff2)[1:(length(coeff2)-length(xvar))] <- 'alpha' alphas2 <- coeff2[which(names(coeff2) == 'alpha')] betas2 <- coeff2[which(names(coeff2) == 'beta')] derr_full <- uml_err_fullM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAU, Beta=betas2, Alphas=alphas2) sigma2_glob <- (t(derr_full)%*%invHv%*%derr_full)/(N*TT) return(sigma2_glob) } constrTAU <- 1-(abs(Rho)+0.05) A2 <- matrix(c(-1,1),nrow=2) B2 <- matrix(c(constrTAU,constrTAU)) mle2 <- maxLik::maxLik(LL2, start=c(TAUhat), constraints=list(ineqA=A2,ineqB=B2), method='BFGS',hess=NULL) TAUhat <- mle2$estimate[1] ytilde <- getYtilde(dataset=dataset0, yvar=yvar0, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAUhat) xtilde <- getXtildeM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho) PI <- getPI(wmat=wmat,Tau=TAUhat,Rho=Rho) V <- getV(PI=PI,m=m) sigma2_glob <- LL3(TAUhat) Vtheta <- getVtheta(Sigma2=as.numeric(sigma2_glob),Sigma2xi=as.numeric(SIGMA2XIhat),V=V) Hv <- getHv(Vtheta=Vtheta,N=N,TT=TT-1) invHv <- solve(Hv) coeff2 <- try(solve(t(xtilde)%*%invHv%*%xtilde)%*% t(xtilde)%*%invHv%*%ytilde) whichNO <- which(apply(xtilde,2,function(x) sum(x) ==0)) names(coeff2)[1:length(coeff2)] <- 'beta' names(coeff2)[1:(length(coeff2)-length(xvar))] <- 'alpha' alphas2 <- coeff2[which(names(coeff2) == 'alpha')] betas2 <- coeff2[which(names(coeff2) == 'beta')] derr_full <- uml_err_fullM(dataset=dataset0, yvar=yvar0, xvarm = xvar0m, xvar=xvar0, wmat=wmat, var.agg=var.agg, Rho=Rho, Tau=TAUhat, Beta=betas2, Alphas=alphas2) rm(sigma2_glob) invHv <- solve(Hv) sigma2 <- (t(derr_full)%*%invHv%*%derr_full)/(N*TT) errors <- ytilde - xtilde%*%coeff2 coeff2varM <- as.numeric(t(errors)%*%errors) * 1/(N*TT-length(coeff2))*solve(t(xtilde)%*%invHv%*%xtilde) coeff2var <- sqrt(diag(coeff2varM)) names(coeff2var)[1:length(coeff2var)] <- 'beta' names(coeff2var)[1:(length(coeff2var)-length(xvar))] <- 'alpha' alphas2var <- coeff2var[which(names(coeff2var) == 'alpha')] betas2sd <- coeff2var[which(names(coeff2var) == 'beta')] names(betas2sd) <- xvar betas2sd <- betas2sd COEF <- data.frame(variable=yvar, estimate=summary(mle2)[6]$estimate[1,1], std.err=summary(mle2)[6]$estimate[1,2]) BETAS <- data.frame(variable=xvar, estimate=betas2, std.err=betas2sd) BETAS <- rbind(COEF,BETAS) rownames(BETAS) <- 1:nrow(BETAS) BETAS$t.value <- BETAS$estimate/BETAS$std.err rownames(BETAS) <- 1:nrow(BETAS) BETAS$p.value <- round(2*stats::pt(-abs(BETAS$t.value),df=N*TT-length(coeff2)),6) BETAS$signif <- '' BETAS$signif[BETAS$p.value<0.1] <- '.' BETAS$signif[BETAS$p.value<0.05] <- '*' BETAS$signif[BETAS$p.value<0.01] <- '**' BETAS$signif[BETAS$p.value<0.001] <- '***' out <- list(BETAS,mle,mle2,sigma2,coeff2) names(out) <- c('coefs','mle','mle2','sigma2','coeffs') return(out) } getYtilde <- function(dataset,yvar,xvar,wmat,var.agg,Rho,Tau){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') datalag <- fdiff(dataset,VAR=yvar,var.agg=var.agg)[,c(1,2,4)] dataset0 <- plyr::join(dataset,datalag,type='right') N <- nrow(wmat) TT <- nrow(dataset)/N datalagsX <- list() Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) xs <- xvar for(i in 2:length(set_Annos)){ temp <- subset(dataset,dataset$Anno == set_Annos[i]) names(temp)[which(!names(temp) %in% var.agg)] <- paste(names(temp)[which(!names(temp) %in% var.agg)],i-1,sep='_') temp[[var.agg0]] <- Anno1 dataX <- plyr::join(dataX,temp,by=var.agg) xs <- c(xs,paste(xvar,i-1,sep='_')) } dataX0 <- cbind(1,dataX[,xs]) names(dataX0)[1] <- 'const' nAlphas <- ncol(dataX0) err_init <- (diag(N)-Rho*wmat)%*%as.matrix(dataX[,yvar]) split_data <- split(dataset0,f=datalag[[var.agg0]]) N <- nrow(split_data[[1]]) err_data <- lapply(split_data, function(w) (diag(N)-Rho*wmat)%*%as.matrix(w[,yvar]) - Tau * as.matrix(w[,yvar1]) ) err_tab <- do.call('rbind',err_data) err_tab1 <- rbind(err_init,err_tab) return(err_tab1) } getXtildeM <- function(dataset,yvar,xvarm,xvar,wmat,var.agg,Rho){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') N <- nrow(wmat) TT <- nrow(dataset)/N+1 Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) dataX <- dataX[,names(dataX) %in% xvar] Xtilde <- matrix(0,nrow=(nrow(dataX)*(TT-1)), ncol=1+((TT-1)*length(xvarm)+length(xvar))) posI <- 1:N posX2 <- 1:ncol(Xtilde) posX3 <- posX2[!posX2 %in% c(1:(1+(TT-1)*length(xvarm)))] Xtilde[posI,1] <- 1 Xtilde[posI,(1+1):(1+length(xvar))] <- as.matrix(dataX[,which(!names(dataX) %in% c(var.agg,yvar))]) for(i in 2:(length(set_Annos))){ temp0 <- subset(dataset,dataset$Anno == set_Annos[i]) temp <- temp0[,names(temp0) %in% xvar] temp00 <- temp0[,names(temp0) %in% xvarm] if(length(posX3)>1){ temp1 <- as.matrix(temp[,which(!names(temp) %in% c(var.agg,yvar))]) temp10 <- as.matrix(temp00[,which(!names(temp00) %in% c(var.agg,yvar))]) lx <- 1+length(xvarm)*(i-1)+1 lx2 <- lx+length(xvarm)-1 #print(paste('lx:',lx)) #print(paste('lx2:',lx2)) }else{ temp1 <- temp[,which(!names(temp) %in% c(var.agg,yvar))] lx <- 1+length(xvarm)*i lx2 <- lx # print(paste('lx:',lx)) #print(paste('lx2:',lx2)) } Xtilde[posI,lx:lx2] <- temp10 Xtilde[((i-1)*N+1):(i*N),posX3] <- temp1 } corX <- stats::cor(Xtilde) dupX <- which(duplicated(corX[,1])) if(length(dupX)>0){ Xtilde2 <- Xtilde[,-dupX] }else{ Xtilde2 <- Xtilde } return(Xtilde2) } uml_err_fullM <- function(dataset,yvar,xvarm,xvar,wmat,var.agg,Rho,Tau,Beta,Alphas){ var.agg0 <- 'Anno' yvar1 <- paste(yvar,'_1',sep='') N <- nrow(wmat) datalag <- fdiff(dataset,VAR=yvar,var.agg=var.agg)[,c(1,2,4)] dataset0 <- plyr::join(dataset,datalag,type='right') datalagsX <- list() Anno1 <- min(dataset$Anno) AnnoT <- max(dataset$Anno) set_Annos <- Anno1:AnnoT dataX <- subset(dataset,dataset$Anno == set_Annos[1]) dataX <- dataX[,names(dataX) %in% c(xvar,var.agg,yvar)] xs <- xvar for(i in 2:length(set_Annos)){ temp <- subset(dataset,dataset$Anno == set_Annos[i]) names(temp)[which(!names(temp) %in% var.agg)] <- paste(names(temp)[which(!names(temp) %in% var.agg)],i-1,sep='_') temp[[var.agg0]] <- Anno1 dataX <- plyr::join(dataX,temp,by=var.agg) xs <- c(xs,paste(xvar,i-1,sep='_')) } less2 <- c(grep('_t1',xs),grep('_lag',xs)) if(length(less2) > 0){ xs <- xs[-less2] }else{ xs <- xs } dataX0 <- cbind(1,dataX[,xs]) names(dataX0)[1] <- 'const' nAlphas <- ncol(dataX0) err_init <- (diag(N)-Rho*wmat)%*%as.matrix(dataX[,yvar]) - as.matrix(dataX0)%*%(Alphas) split_data <- split(dataset0,f=datalag[[var.agg0]]) N <- nrow(split_data[[1]]) err_data <- lapply(split_data, function(w) (diag(N)-Rho*wmat)%*%as.matrix(w[,yvar]) - Tau * as.matrix(w[,yvar1]) - as.matrix(w[,xvar]) %*% as.matrix(Beta)) err_tab <- do.call('rbind',err_data) err_tab1 <- rbind(err_init,err_tab) return(err_tab1) } getPI <- function(wmat,Tau,Rho){ N <- nrow(wmat) PI <- Tau*solve(diag(N) - Rho*wmat) return(PI) } getV <- function(PI,m){ N <- nrow(PI) PIm <- PI for(i in 1:(2*m-1)){ PIm <- PIm %*%PI } V <- 2*solve(diag(N)+PI)%*%(diag(N) + PIm) return(V) } getVtheta = function(Sigma2,Sigma2xi,V){ N <- nrow(V) theta <- (Sigma2/Sigma2xi) Vtheta <- (theta*diag(N)+V) return(Vtheta) } getHv <- function(Vtheta,N,TT){ Hv <- diag(N*TT)*2 for(i in 1:(TT-1)){ Hv[(1:N)+N*i,(1:N)+N*(i-1)] <- -diag(N) Hv[(1:N)+N*(i-1),(1:N)+N*i] <- -diag(N) } Hv[1:N,1:N] <- Vtheta return(Hv) } uml_data <- function(dataset,ff,var.agg){ y <- gsub('//(//)','',ff[2]) x0 <- gsub('//(//)','',ff[3]) x0 <- gsub(' ','',x0) x <- strsplit(x0,split='\\+')[[1]] z <- c(y,x) fdata <- fdiff(dataset,VAR=z[1],var.agg=var.agg)[,c(1,2,5)] for(i in 2:length(z)){ fdata <- plyr::join(fdata,fdiff(dataset,VAR=z[i],var.agg=var.agg)[,c(1,2,5)]) } return(fdata) } effectsST <- function(dataset, var.agg, W,Tau, Rho, periods=NULL){ # gives matrix Q from paper Debarsky, # Ertur, LeSage N <- length(levels(as.factor(dataset[[var.agg]]))) if(is.null(periods)){ TT <- length(min(dataset$Anno):max(dataset$Anno)) }else{ TT <- periods } C <- -(diag(N)*Tau) B <- (diag(N) - Rho*W) EFF <- matrix(0,ncol=N*TT,nrow=N*TT) seqB1 <- list() seqB2 <- list() seqC1 <- list() seqC2 <- list() for(i in 1:TT){ seqB1[[i]] <- (1+(N*(i-1))) seqB2[[i]] <- N*i seqC1[[i]] <- (1+(N*(i))) seqC2[[i]] <- N*(i+1) } for(j in 1:(TT)){ EFF[seqB1[[j]]:seqB2[[j]], seqB1[[j]]:seqB2[[j]]] <- B } for(k in 1:(TT-1)){ EFF[seqC1[[k]]:seqC2[[k]], seqB1[[k]]:seqB2[[k]]] <- C } return(EFF) } #' Simulate space-time stochastic process with fixed-effect #' #' This function simulates a space-time stochastic process according to the defined spatial structure and input paramters. It simulates data of a dynamic spatial lag model. It includes one exogenous variable and a fixed-effect correlated with the exogenous variable. #' #' @param dataset SpatialObject with the spatial units for which the data will be simulated #' @param N How many spatial units will be used #' @param TT Time dimension of the simulated process #' @param spatial Radius that defines the scope of spatial dependence #' @param Tau Autocorrelation parameter #' @param Rho Spatial dependence parameter #' @param Beta Coefficient associated to the exogenous variable #' @param sdDev Standard Deviation of the (gaussian) error term #' @param startingT The number of time periods after which the simulated data will be recorded #' @param LONGLAT Boolean. If the projection is longlat #' #' @return A list with two objects. The first object is the STFDF with the simulated data. The second object is the spatial weight matrix #' @examples #' \donttest{ #' library(spacetime) #' library(sp) #' library(spdep) #' set.seed(123) #' sd = sim_data_fe(dataset=regsamp,N=100,TT=8, #' spatial = 80,Tau = -0.2,Rho = 0.4, #' Beta = 2,sdDev = 2,startingT = 10, #' LONGLAT = TRUE) #' stplot(sd[[1]][,,'Y']) #' dev.new() #' plot(sel_regioni) #' points(coordinates(sd[[1]]@sp)) #' plot(mat2listw(sd[[2]]),coordinates(sd[[1]]@sp),add=TRUE,col=2) #' } #' #' @export sim_data_fe = function(dataset,N,TT,spatial=100, Tau=-0.14,Rho=0.67,Beta=1,sdDev=5, startingT = 11,LONGLAT=TRUE){ N = N # prendo un'osservazione in piu' per poterla poi # eiminare T0 = TT TT = TT+startingT # prendo le coordinate prova = sp::spTransform(dataset[1:N,], sp::CRS("+proj=longlat +datum=WGS84")) if(is(dataset,"SpatialPointsDataFrame")){ Coordinates = sp::coordinates(prova) spat = sp::SpatialPoints(Coordinates) sp::proj4string(spat) = sp::proj4string(prova) }else{ Coordinates = sp::coordinates(prova@sp) spat = prova@sp } if(LONGLAT){ spatial_conv = spatial }else{ conv_unit = 30/0.358 spatial_conv = spatial/conv_unit } netw = spdep::dnearneigh(Coordinates,d1=0,d2=spatial_conv,longlat=LONGLAT) matw = spdep::nb2mat(netw,zero.policy=TRUE) Tau = Tau Rho = Rho Beta = Beta listX = list() listX1 = list() listU = list() # costruisco matrice (I - rho*W) w = kronecker(diag(TT),matw) w = Matrix::Matrix(w,sparse=TRUE) # costruisco matrice (I - rho*W)^(-1) W = as.matrix(solve(diag(N*TT) - Rho*w)) Sigma = W listN = list() listFE = list() # creo la prima osservazione for(i in 1:N){ listN[[i]] = rep(0,TT) listN[[i]][1] = stats::runif(1,-2,2) listU[[i]] = rep(0,TT) listU[[i]] = stats::rnorm(TT,mean=0,sd=sdDev) listFE[[i]] = stats::runif(1,-10,10) listX[[i]] = stats::arima.sim(list(ar=0.02),TT) listX1[[i]] = listX[[i]]+0.3*listFE[[i]] } # per ogni periodo temporale, prendo ongi soggetto e # creo i dati in questo modo: # y_11 = (I - rho*W)^(-1) %*% [y_10,y_20,y_30, ... ,y_n0] * Tau + # (I - rho*W)^(-1) %*% [x_11,x_21,x_31, ... ,x_n1] * Beta + # (I - rho*W)^(-1) %*% [e_11,e_21,e_31, ... ,e_n1] + # (I - rho*W)^(-1) %*% FE idxN = 1 idxT = 1 for(tt in 2:TT){ for(j in 1:N){ listN[[j]][tt] = Sigma[idxN,idxT:(idxT+N-1)] %*% as.matrix(unlist(lapply(listN, function(x) x[tt-1]))) * Tau + Sigma[idxN,idxT:(idxT+N-1)] %*% as.matrix(unlist(lapply(listX1, function(x) x[tt]))) * Beta + Sigma[idxN,idxT:(idxT+N-1)] %*% as.matrix(unlist(lapply(listU, function(x) x[tt]))) + Sigma[idxN,idxT:(idxT+N-1)] %*% as.matrix(unlist(listFE)) idxN = idxN + 1 } idxT = idxT + N } Y = unlist(listN) X = unlist(listX1) U = unlist(listU) I = rep(1:N,each=TT) A = rep(1:TT,N) Data = data.frame( Cod_Provincia = I, Anno = A, Y = Y, X1 = X) #Data = subset(Data,Anno>1) Data = Data[order(Data$Anno,Data$Cod_Provincia, decreasing=FALSE),] ttimes= seq(ISOdate(2000,1,1), ISOdate(2000+(TT-1),1,1), "years") prova = spacetime::STFDF(spat,ttimes,Data) prova = prova[,(TT-T0+1):TT] prova$Anno = prova$Anno - (startingT-1) out = list(prova,matw) return(out) }
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/apis_functions.R
#' Matrix of technological distance #' #' A matrix of agricultural technological distance of NUTS3. It is used to weight the geographical distance of NUTS3 regions. #' #' @format A 106x106 matrix #' "distOte"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/distOte.R
#' Cropland grid of Northen Italy (20km x 20km squares) #' #' SpatialPolygonsDataFrame object with croplands of Northern Italy approximated with 20km x 20km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "map1"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/map1.R
#' Cropland grid of Northen Italy (40km x 40km squares) #' #' SpatialPolygonsDataFrame object with croplands of Northern Italy approximated with 40km x 40km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "map2"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/map2.R
#' Cropland grid of Northen Italy (60km x 60km squares) #' #' SpatialPolygonsDataFrame object with croplands of Northern Italy approximated with 60km x 60km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "map3"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/map3.R
#' Cropland grid of Northen Italy (100km x 100km squares) #' #' SpatialPolygonsDataFrame object with croplands of Northern Italy approximated with 100km x 100km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "map4"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/map4.R
#' Cropland grid of Italy (100km x 100km squares) #' #' SpatialPolygonsDataFrame object with croplands of Italy approximated with 100km x 100km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "r100km"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/r100km.R
#' Cropland grid of Italy (20km x 20km squares) #' #' SpatialPolygonsDataFrame object with croplands of Italy approximated with 20km x 20km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "r20km"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/r20km.R
#' Cropland grid of Italy (40km x 40km squares) #' #' SpatialPolygonsDataFrame object with croplands of Italy approximated with 40km x 40km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "r40km"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/r40km.R
#' Cropland grid of Italy (60km x 60km squares) #' #' SpatialPolygonsDataFrame object with croplands of Italy approximated with 60km x 60km squared polygons. It is based on croplands data contained in the Corine Land Cover 2012 raster map. #' #' @format SpatialPolygonsDataFrame object #' @source \url{https://land.copernicus.eu/pan-european/corine-land-cover/clc-2012} "r60km"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/r60km.R
#' Coordinates of simulated farms in Northern Italy #' #' SpatialPointsDataFrame object with 1000 SpatialPoints to represent simulated farms in the simulation exercise #' #' @format SpatialPointsDataFrame object #' "regsamp"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/regsamp.R
#' Merged NUTS3 for simulation exercise #' #' SpatialPolygons object with merged NUTS3 regions of Northern Italy used in the simulation exercise #' #' @format SpatialPolygons object #' @source \url{https://www.istat.it/it/archivio/222527} "sel_regioni"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/sel_regioni.R
#' Neighbors of NUTS3 of Sardinia #' #' A table containing the link of NUTS3 in Sardinia with NUTS3 of mainland Italy #' #' @format A data frame with the links #' "tabSard"
/scratch/gouwar.j/cran-all/cranData/AGPRIS/R/tabSard.R
# This is one of the main functions. This function computes the proposed AHM model. # It needs to tell clearly what are the major components # under each major component, how many minor components are there # assuming less than 10 minor components under each major #' This is one of the main functions. The function ahm computes the proposed additive heredity model. #' @param x data.frame Note the column names of the x should be in the order of major components, minor components, and no interactions are needed. #' @param y numeric vector #' @param num_major number of major components #' @param dist_minor the allocation of number of minor components nested under major components #' @param type heredity type, weak heredity is the current support type #' @param lambda_seq a numeric vector for the options of lambda used in ridge regression for estimating the initials #' @param mapping_type the form of the coefficient function of major components in front of corresponding minor terms. Currently only support "power" #' @param powerh the power parameter used for the power function #' @param rep_gcv the number of choices of tuning parameter used in the GCV selection #' @param nfolds used in cv.glmnet for initial value of parameters in the non-negative garrote method #' @param alpha 0 is for the ridge in glmnet https://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html #' @import glmnet quadprog #' @return Return a list #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), #' type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, #' mapping_type = c("power"), powerh = h_tmp, #' rep_gcv=100) #' summary(out) ahm = function(y, x, num_major = 3, dist_minor = c(2,2,1), type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfolds=NULL, mapping_type = c("power"), powerh = 0, rep_gcv=100){ call <- match.call() # store original y, and x with interactions augmented x_orig = x x_aug_orig = augment_df (x, num_major, dist_minor) y_orig = y x = mapping_function (x, num_major, dist_minor, mapping_type, powerh) # add mapping function in front of the minor components x_aug0 = augment_df (x, num_major, dist_minor) # scale x ## uncentered data in scale: x_aug x_aug=scale(as.matrix(x_aug0),center=FALSE,scale = apply(as.matrix(x_aug0), 2, sd, na.rm = TRUE)) y=scale(as.matrix(y),center=FALSE,scale = apply(as.matrix(y), 2, sd, na.rm = TRUE)) # update 05182018 # zero variance columns return NaN # https://stackoverflow.com/questions/15363610/why-does-scale-return-nan-for-zero-variance-columns x_aug[is.nan(as.matrix(x_aug))] <- 0 x_aug = data.frame(x_aug) colname = colnames(x_aug) num_obs = nrow(x_aug) if (is.null(nfolds)) nfolds = num_obs # LOOCV # use RIDGE for the initial values # LOOCV in glmnet cv.RIDGE1<-suppressWarnings( cv.glmnet(as.matrix(x_aug),y=y,alpha=alpha,family='gaussian',intercept=FALSE,nfolds=nfolds,lambda = lambda_seq) ) # plot(cv.RIDGE1,xvar="lambda") coef.RIDGE1=coef(cv.RIDGE1, s = "lambda.min") # best_lambda.RIDGE1 <- cv.RIDGE1$lambda.min # Convert coef from sparse matrix to normal one coef.RIDGE1=Matrix(coef.RIDGE1, sparse = FALSE) # # construct the quad optim # details can be found in the package vignnet B=diag((as.vector(coef.RIDGE1))[-1]) Z=as.matrix(x_aug)%*%B D=t(Z)%*%Z # matrix D in qp is required to be pd # nearPD computes the nearest positive definite matrix. D=Matrix::nearPD(D)$mat d=t(Z)%*%y num_coef = nrow(B) A=cbind(-1,diag(num_coef)) if (type == "weak"){ con = generate_con_weak (x_aug, B, num_major, dist_minor) } else { # type == "strong" # con = generate_con_strong (x_aug, B, num_major, dist_minor) } A=cbind(A,t(con)) b0_min=0 M_min_set=b0_min+0.01 # M_min_set is chosen large enough in case of constraints inconsistent M=seq(M_min_set,ncol(B),length=rep_gcv) gcv=numeric(rep_gcv) beta_hat=matrix(rep(0,num_coef*rep_gcv),nrow=num_coef, ncol=rep_gcv) # 15 is because there is 15 parameters in b for(i in 1:rep_gcv) { b0=c(-M[i],rep(0,ncol(A)-1)) # b0=c(-M[i],rep(b0_min,9),rep(0,6),rep(0,ncol(A)-15-1)) coef.nng=solve.QP(D,d,A,b0)$sol beta.nng=B%*%coef.nng beta_hat[,i]=beta.nng e=y-Z%*%coef.nng # gcv[i]=sum(e^2)/(length(y)*(1-t(diag(w))%*%coef.nng/length(y))^2) gcv[i]=sum(e^2)/(length(y)*(1-M[i]/length(y))^2) } M_min=M[which.min(gcv)] b0=c(-M_min,rep(0,ncol(A)-1)) coef.nng=round(solve.QP(D,d,A,b0)$sol,10) # 10 is round digits names(coef.nng)=colname # coef.nng beta.nng=B%*%coef.nng row.names(beta.nng)=colname coef_table_scaled=round(cbind(coef.RIDGE1[-1,1],beta.nng,as.matrix(coef.nng)),7) # 7 is round digits colnames(coef_table_scaled)= c('Ridge_initial','Theta_nng','Scalor_b') e=y-Z%*%coef.nng # metrics # beta.nng.orig=colSdApply(as.matrix(y_orig, ncol=1))*diag(1/as.vector(colSdApply(x_aug_orig)))%*%beta.nng beta.nng.orig=colSdApply(as.matrix(y_orig, ncol=1))*diag(1/as.vector(colSdApply(x_aug0)))%*%beta.nng # update Dec 08, 2018 # update 05182018 # due to the D-optimal design points selected, some coefficients are zero # https://stackoverflow.com/questions/15363610/why-does-scale-return-nan-for-zero-variance-columns beta.nng.orig[is.nan(as.matrix(beta.nng.orig))] <- 0 # e.orig=as.matrix(y_orig,ncol=1)-as.matrix(x_aug_orig,nrow=num_obs)%*%beta.nng.orig e.orig=as.matrix(y_orig,ncol=1)-as.matrix(x_aug0,nrow=num_obs)%*%beta.nng.orig # update Dec 08, 2018 mse=drop(t(e.orig)%*%e.orig/(num_obs-sum(beta.nng.orig!=0))) # sst=drop(t(as.matrix(y_orig))%*%as.matrix(y_orig) - (sum(as.matrix(y_orig)))^2/num_obs) sst=drop(t(as.matrix(y_orig))%*%as.matrix(y_orig)) # https://stats.stackexchange.com/questions/26176/removal-of-statistically-significant-intercept-term-increases-r2-in-linear-mo R2=drop(1-t(e.orig)%*%e.orig/sst) # AIC rss=drop(t(e.orig)%*%e.orig) effective_num_pred = sum(beta.nng.orig!=0) + 1 # one is for the parameter sigma aicc = compute_aicc (rss=rss, n=num_obs, p=effective_num_pred, type = "AICc") # rownames(beta.nng.orig) = colname # output # At = t(A) quad_prog = enlist(D, d, A, b0, M_min) x_aug_including_mapping_coef = x_aug0 out = c(list(beta_nng=beta.nng.orig, residual= e.orig) , enlist( mse, R2, aicc, coef_table_scaled, quad_prog, x_aug_including_mapping_coef, x_aug_orig, y_orig, powerh, num_major, dist_minor, mapping_type, call)) # note, beta_nng is corresponding to the data with mapping function*minor components #out = NULL class(out) = "ahm" return(out) } #' Summary method for the fitted ahm object #' #' @param object fitted ahm object #' @param ... not used #' @rdname summary.ahm #' @method summary ahm #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), #' type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, #' mapping_type = c("power"), powerh = h_tmp, #' rep_gcv=100) #' summary(out) #' #' summary.ahm = function(object,...) { print(object$call) cat("\n") cat("The mse of model is ") print(object$mse) cat(",") cat("\n") cat("The AICc of model is ") print(object$aicc) cat(",") cat("\n") cat("The R2 of model is ") print(object$R2) cat(",") cat("\n") cat("The estimated coefficients are: \n") print(t(object$beta_nng)) cat("\n") cat("If power function as the coefficients were used, the power parameter, h, used in the model is ") print(object$powerh) } #' Coefficient method for the fitted ahm object #' #' @param object ahm object #' @param ... not used #' @return a numerical vector #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), #' type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, #' mapping_type = c("power"), powerh = h_tmp, #' rep_gcv=100) #' coef(out) #' coef.ahm = function(object, ...){ object$beta_nng } #' Predict method for the fitted ahm object #' #' @param object ahm object #' @param newx Matrix of new values for x at which predictions are to be made. #' @param ... not used #' @return predicted value(s) at newx #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' y = data_fat[,1] #' out = ahm (y, x, num_major = 3, dist_minor = c(2,2,1), #' type = "weak", alpha=0, lambda_seq=seq(0,5,0.01), nfold = NULL, #' mapping_type = c("power"), powerh = h_tmp, #' rep_gcv=100) #' predict(out) #' predict.ahm = function(object, newx, ...){ if (0) { # object = out # newx = x[2,] # predict(out) # predict(out, newx = x[2,]) } if (missing(newx)) { newx_aug = object$x_aug_including_mapping_coef } else { if (class(newx) == "numeric") { newx = as.data.frame(newx) } num_col = ncol(newx) #name = colnames(newx) if (num_col != (object$num_major + sum(ifelse(object$dist_minor==1, 0, object$dist_minor)) )) { message("Error: The provided newx does not satify the format of predict matrix. \n") message("The format of predictor matrix is: major component + minor components nested under each major components. \n") message("For example, c1, c2, c3, x11, x12, x21,x22,x23, x31, x32.") } else { powerh = object$powerh num_major = object$num_major dist_minor = object$dist_minor mapping_type = object$mapping_type # apply the mappiing functions in front of the minor components newx = mapping_function (newx, num_major, dist_minor, mapping_type, powerh) # construct the interaction terms newx_aug = augment_df (newx, num_major, dist_minor) } } out = newx_aug %*% coef(object) return(out) } #' Mapping_function is a function to add the functional coefficients of major components in front of minor components terms #' #' @param x data.frame Note the column names of the x should be in the order of major components, minor components, and no interactions are needed. #' @param num_major number of major components #' @param dist_minor the allocation of number of minor components nested under major components #' @param mapping_type the form of the coefficient function of major components in front of corresponding minor terms. Currently only support "power" #' @param powerh the power parameter used for the power function #' @return data frame #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' mapping_function(x=x, num_major=3, dist_minor=c(2,2,1), mapping_type = c("power"), powerh=0) mapping_function = function(x, num_major = 3, dist_minor = C(2,2,1), mapping_type = c("power"), powerh = 0){ # h is the tuning parameter c^{h} for the transformation # 0 < h <= 2 if (0) { powerh=0.5 } # type = match.arg(type) colname = colnames(x) index_major = seq(from = 1, to = num_major, by=1) index_minor = seq(from = (index_major[length(index_major)]+1), to = (num_major+sum(ifelse(dist_minor==1, 0, dist_minor))), by = 1) if (mapping_type == "power") { # for each minor component # multiple the mappling power function c^h for (k in index_minor) { corresponding_minor_element = colname[k] corresponding_minor_element = unlist(strsplit(corresponding_minor_element, "[a-z]+"))[2] corresponding_minor_element = floor(as.numeric(as.character(corresponding_minor_element))/10) # assuming less than 10 minor components under each major corresponding_major_element = paste("c",corresponding_minor_element,sep='') x[,colname[k]] = x[,corresponding_major_element]^powerh * x[,colname[k]] } } return (x) } # function colSD colSdApply <- function(x, ...)apply(X=x, MARGIN=2, FUN=sd, ...) # generates matrix corresponding to the constraints in quad programming generate_con_weak = function(x_aug, B, num_major, dist_minor){ # the pattern of the colume names in x_aug # num_major = 3, dist_minor = c(2,2,1) # c1 c2 c3 x11 x12 x21 x22 c1c2 c1c3 c2c3 x11x12 x21x22 index_major = seq(from = 1, to = num_major, by=1) index_minor = seq(from = (index_major[length(index_major)]+1), to = (num_major+sum(ifelse(dist_minor==1, 0, dist_minor))), by = 1) index_majorbymajor = seq(from = (index_minor[length(index_minor)]+1), to = (index_minor[length(index_minor)] + choose(num_major,2)), by = 1) counter_majorbymajor = 0 for (k in 1:length(dist_minor)) { if (dist_minor[k] != 1) { counter_majorbymajor = counter_majorbymajor + choose(dist_minor[k],2) } } counter_majorbymajor num_major = length(index_major) num_minor = length(index_minor) num_majorbymajor = length(index_majorbymajor) num_minorbyminor = counter_majorbymajor index_minorbyminor = seq(from = (index_majorbymajor[length(index_majorbymajor)]+1), to = (index_majorbymajor[length(index_majorbymajor)]+num_minorbyminor)) num_row_con = num_majorbymajor + num_minorbyminor + sum(ifelse(dist_minor==1, 0, dist_minor)) # does not include the component if dist_minor element = 1, which indicates no minor components nested under that major component num_col_con = max(index_minorbyminor) #nrow(B) # update Dec 23, 2018 con_init = matrix(0, nrow=num_row_con, ncol=num_col_con) colnames(con_init) = colnames(x_aug) # starting from the first row, row-by-row construction id_row = 1 # constraints related to the heredity, focusing on the interaction part in the matrix con ## related to the major interaction components for (kk in index_majorbymajor) { con_init[id_row, kk] = -1 corresponding_element = unlist(strsplit(colnames(con_init)[kk], "[.]")) con_init[id_row, corresponding_element] = 1 id_row = id_row + 1 } ## related to the minor interaction components, focusing on the minor components part in the matrix con for (ll in index_minorbyminor) { con_init[id_row, ll] = -1 corresponding_element = unlist(strsplit(colnames(con_init)[ll], "[.]|[:]")) con_init[id_row, corresponding_element] = 1 id_row = id_row + 1 } # constraints related to the major-minor hierarchical for (gg in index_minor) { con_init[id_row, gg] = -1 # https://stat.ethz.ch/pipermail/r-help/2014-July/420407.html corresponding_element = unlist(strsplit(colnames(con_init)[gg], "[a-z]+"))[2] corresponding_element = floor(as.numeric(as.character(corresponding_element))/10) # assuming less than 10 minor components under each major corresponding_element = paste("c",corresponding_element,sep='') con_init[id_row, corresponding_element] = 1 id_row = id_row + 1 } return(con_init) } # augment the data frame by adding 2 factor interactions # based on the ahm model augment_df = function( x, num_major = 3, dist_minor = c(2,2,1)){ if (0) { # rm(list=ls()) # library(devtools) # load_all() # data("pringles_fat") # to please R CMD CHECK x = pringles_fat[,c("c1","c2","c3","x11","x12","x21","x22")] y = pringles_fat[,1] # the AHM function for general case num_major = 3 dist_minor = c(2,2,1) augment_df (x, num_major = 3, dist_minor = c(2,2,1) ) # another data data_fat_I = NULL # to please R CMD CHECK # data("data_fat_I") # to please R CMD CHECK x = data_fat_I[,c("c1","c2","c3","x11","x12","x21","x22","x31","x32")] y = data_fat_I[, 1] num_major = 3 dist_minor = c(2,2,2) augment_df (x, num_major = 3, dist_minor = c(2,2,2) ) # another artifical data # data("data_fat_I") # to please R CMD CHECK x = data_fat_I[,c("c1","c2","c3","x11","x12","x21","x22","x31","x32", "x11x12")] x$x11x12 = x$x11x12/2+0.01 colnames(x)[10]="x33" y = data_fat_I[, 1] num_major = 3 dist_minor = c(2,2,3) augment_df (x, num_major = 3, dist_minor = c(2,2,3) ) } dat = x names_major = rep(0, num_major) names_minor = c() #rep(0, sum(dist_minor[dist_minor>1])) # if minor component = 1, does not count # loop over minors over each major components for (i in 1:length(dist_minor)) { #i=1 # use i as the id for the major components names_major[i] <- paste("c", i, sep = "") # names for major components num_minor = dist_minor[i] # num of minor nested under that major component if (num_minor == 1) { # do nothing # does not generate the minor when num_minor = 1 } else { ids_minor = seq(from=1, to=num_minor, by=1) # id for the minor components names_minor_tmp = paste("x", i,ids_minor, sep = "") names_minor <- c(names_minor, names_minor_tmp) # names for minor components # # # generate the interactions among these minors # dat_tmp = dat[,] } } names_minor names_major names = c(names_major, names_minor) assign_colnames = TRUE if (assign_colnames) colnames(dat) = names # assign the colnames out_dat = dat # expand the data set ## major major_names = grep("c", names, value=TRUE) out_dat = cbind(out_dat,expand_interactions (dat, major_names)) ## minor # dist_minor = c(2,3,2) for (j in 1:length(dist_minor)) { num_minor = dist_minor[j] if (num_minor != 1) { minor_names = grep(paste("x",j,sep=""), names, value=TRUE) expanded_dat = expand_interactions (dat, minor_names) out_dat = cbind(out_dat,expanded_dat) } else { # do nothing # no minor components nested # do not expand interactions among minors } } out = t(unique(t(out_dat))) # remove duplicated columns resulted from 2fi expansion return(out) }
/scratch/gouwar.j/cran-all/cranData/AHM/R/ahm.R
#' Photoresist-coating experiment data #' #' @docType data #' #' @usage data(coating) #' #' @format data.frame #' #' @keywords datasets #' #' @references Cornell, J.A. and Ramsey, P.J. (1998). A Generalized mixture model for #' categorized-components problems with an application to a #' photoresist-coating experiment. \emph{Technometrics}, 40(1), 48-61. #' (\href{https://www.tandfonline.com/doi/abs/10.1080/00401706.1998.10485481}{tandfonline}) #' #' #' @examples #' data(coating) #' \donttest{print(coating)} "coating" data(coating, envir=environment())
/scratch/gouwar.j/cran-all/cranData/AHM/R/coating-data.R
#' compute AICc #' #' #' @param rss residual sum of squares #' @param n number of observation #' @param p number of nonzero parameters #' @param type character "AICc" #' @references \href{https://stats.stackexchange.com/questions/87345/calculating-aic-by-hand-in-r/}{Calculating AIC “by hand” in R in Stack Overflow} #' @export #' @examples #' compute_aicc (rss=10, n=30, p=6, type = "AICc") compute_aicc = function(rss, n, p, type = "AICc"){ # rss is the residual sum of squares # n is number of observations # p is the number of nonzero parameters: coef + 1 (1 for sigma) if (type == "AICc") { scalor = 2 } out = n*log(rss/n) + scalor*p*n/(n-p-1) return (out) } #' Expand the interaction terms for each subset group, say x11, x12, or c1, c2, c3 #' #' #' @param dat data frame #' @param sel_names characters #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' expand_interactions (dat=x, sel_names=c("c1", "c2", "c3")) expand_interactions = function(dat, sel_names){ # note: the first column is the response # we do not expand the response column dat = dat[,sel_names] names = colnames(dat) # equal to sel_names term = paste("~(", paste(names,collapse="+"),")^2-1") #term = paste("~(", term, ")^2-1") term = as.formula(term) term = model.matrix(term, dat) # dat_aug = model.matrix(as.formula(term), dat) # out = data.frame(dat_aug, dat[,ncol(dat)]) # #colnames(out)[1] = colnames(dat)[1] # colnames(out)[ncol(out)] = colnames(dat)[ncol(dat)] out = term # return in format of matrix return (out) } # this utility function is taken from the R package bestsubset in GitHub #' Create a list #' #' @param ... object to be included as elements in the list #' @export #' @examples #' item = c(1:10) #' enlist(item) # enlist <- function (...) # { # result <- list(...) # if ((nargs() == 1) & is.character(n <- result[[1]])) { # result <- as.list(seq(n)) # names(result) <- n # for (i in n) result[[i]] <- get(i) # } # else { # n <- sys.call() # n <- as.character(n)[-1] # if (!is.null(n2 <- names(result))) { # which <- n2 != "" # n[which] <- n2[which] # } # names(result) <- n # } # result # } # # Due to copy issue in the CRAN # I am writing my own easy-version of the enlist function enlist = function (...) { result = list(...) the_call = sys.call() names = as.character(the_call)[-1] names(result) = names return(result) } #' Compute the conditional number of design matrix #' #' @param x matrix to be used in svd #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' find_condition_num (x) find_condition_num = function (x) { singluar_values = base::svd(x)$d # This sensitivity of the solution x to changes in the right hand side b is a reflection of the condition number. # https://blogs.mathworks.com/cleve/2017/07/17/what-is-the-condition-number-of-a-matrix/ cond_num = (max(singluar_values)/min(singluar_values))^2 return (cond_num) } #' Check column correlations #' #' @param dat data.frame #' @export #' @examples #' data("pringles_fat") #' data_fat = pringles_fat #' h_tmp = 1.3 #' x = data_fat[,c("c1","c2","c3","x11","x12","x21","x22")] #' check_col_correlation (dat=x) # the multiple scheffe model # check column correlation # https://stackoverflow.com/questions/22282531/how-to-compute-correlations-between-all-columns-in-r-and-detect-highly-correlate check_col_correlation = function(dat){ #library(dplyr); library(tidyr); library(tibble) value = var1 = var2 = NULL ## To please R CMD check dat_sub = dat d2 <- dat_sub %>% as.matrix %>% cor %>% as.data.frame %>% tibble::rownames_to_column(var = 'var1') %>% tidyr::gather(var2, value, -var1) d2 }
/scratch/gouwar.j/cran-all/cranData/AHM/R/common.R