Skip to content

Commit

Permalink
corrections in the CV functions and new CV results
Browse files Browse the repository at this point in the history
  • Loading branch information
thengl committed Feb 8, 2018
1 parent bd2f822 commit beb9a5b
Show file tree
Hide file tree
Showing 15 changed files with 125 additions and 53 deletions.
48 changes: 35 additions & 13 deletions grids/cv/cv_functions.R
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,13 @@ predict_parallel <- function(j, sel, varn, formulaString, rmatrix, idcol){
## --------------------------------------------------------------

## predict soil properties in parallel:
predict_parallelP <- function(j, sel, varn, formulaString, rmatrix, idcol, method, cpus, Nsub=1e4){
predict_parallelP <- function(j, sel, varn, formulaString, rmatrix, idcol, method, cpus, Nsub=1e4, remove_duplicates=FALSE){
s.train <- rmatrix[!sel==j,]
if(remove_duplicates==TRUE){
## TH: optional - check how does model performs without the knowledge of the 3D dimension
sel.dup = !duplicated(s.train[,idcol])
s.train <- s.train[sel.dup,]
}
s.test <- rmatrix[sel==j,]
n.l <- dim(s.test)[1]
if(missing(Nsub)){ Nsub = length(all.vars(formulaString))*50 }
Expand Down Expand Up @@ -148,16 +153,28 @@ predict_parallelP <- function(j, sel, varn, formulaString, rmatrix, idcol, metho
return(obs.pred)
}

cv_numeric <- function(formulaString, rmatrix, nfold, idcol, cpus, method="ranger", Log=FALSE){ varn = all.vars(formulaString)[1]
cv_numeric <- function(formulaString, rmatrix, nfold, idcol, cpus, method="ranger", Log=FALSE, LLO=TRUE){
varn = all.vars(formulaString)[1]
message(paste("Running ", nfold, "-fold cross validation with model re-fitting method ", method," ...", sep=""))
if(nfold > nrow(rmatrix)){
stop("'nfold' argument must not exceed total number of points")
}
if(sum(duplicated(rmatrix[,idcol]))>0.5*nrow(rmatrix)){
sel <- dismo::kfold(rmatrix, k=nfold, by=rmatrix[,idcol])
message(paste("Subsetting observations by '", idcol, "'"))
if(LLO==TRUE){
## TH: Leave whole locations out
ul <- unique(rmatrix[,idcol])
sel.ul <- dismo::kfold(ul, k=nfold)
sel <- lapply(1:nfold, function(o){ data.frame(row.names=which(rmatrix[,idcol] %in% ul[sel.ul==o]), x=rep(o, length(which(rmatrix[,idcol] %in% ul[sel.ul==o])))) })
sel <- do.call(rbind, sel)
sel <- sel[order(as.numeric(row.names(sel))),]
message(paste0("Subsetting observations by unique location"))
} else {
sel <- dismo::kfold(rmatrix, k=nfold, by=rmatrix[,idcol])
message(paste0("Subsetting observations by '", idcol, "'"))
}
} else {
sel <- dismo::kfold(rmatrix, k=nfold)
}
if(nfold > nrow(rmatrix)){
stop("'nfold' argument must not exceed total number of points")
message(paste0("Simple subsetting of observations using kfolds"))
}
if(missing(cpus)){
if(method=="randomForest"){
Expand All @@ -179,12 +196,17 @@ cv_numeric <- function(formulaString, rmatrix, nfold, idcol, cpus, method="range
}
}
if(method=="ranger"){
snowfall::sfInit(parallel=TRUE, cpus=ifelse(nfold>cpus, cpus, nfold))
snowfall::sfExport("predict_parallelP","idcol","formulaString","rmatrix","sel","varn","method")
snowfall::sfLibrary(package="plyr", character.only=TRUE)
snowfall::sfLibrary(package="ranger", character.only=TRUE)
out <- snowfall::sfLapply(1:nfold, function(j){predict_parallelP(j, sel=sel, varn=varn, formulaString=formulaString, rmatrix=rmatrix, idcol=idcol, method=method)})
snowfall::sfStop()
if(cpus==1){
out <- lapply(1:nfold, function(j){predict_parallelP(j, sel=sel, varn=varn, formulaString=formulaString, rmatrix=rmatrix, idcol=idcol, method=method)})
} else {
require("snowfall")
snowfall::sfInit(parallel=TRUE, cpus=ifelse(nfold>cpus, cpus, nfold))
snowfall::sfExport("predict_parallelP","idcol","formulaString","rmatrix","sel","varn","method")
snowfall::sfLibrary(package="plyr", character.only=TRUE)
snowfall::sfLibrary(package="ranger", character.only=TRUE)
out <- snowfall::sfLapply(1:nfold, function(j){predict_parallelP(j, sel=sel, varn=varn, formulaString=formulaString, rmatrix=rmatrix, idcol=idcol, method=method)})
snowfall::sfStop()
}
}
## calculate mean accuracy:
out <- plyr::rbind.fill(out)
Expand Down
33 changes: 23 additions & 10 deletions grids/models/HISTPR_SoilGrids.R
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ wrapper.OCSTHA <- function(i, in.path, n.lst=c("ORCDRC","BLD.f","CRFVOL"), ORCDR

## derive OCS from organic carbon density (and depth to bedrock) -----
## this formula averages between two estimates
wrapper.ocd2OCSTHA <- function(i, in.path, BDR.lst=c("BDRICM","BDRLOG","BDTICM"), sdepth = c(0, 5, 15, 30, 60, 100, 200), st = c(30,100,200), n.lst=c("ORCDRC","BLD.f","CRFVOL"), ORCDRC.sd=20, BLD.sd=100, CRFVOL.sd=5){
wrapper.ocd2OCSTHA <- function(i, in.path, BDR.lst=c("BDRICM","BDRLOG","BDTICM"), sdepth = c(0, 5, 15, 30, 60, 100, 200), st = c(30,100,200), n.lst=c("ORCDRC","BLD.f","CRFVOL"), ORCDRC.sd=20, BLD.sd=100, CRFVOL.sd=5, bias.correction=TRUE, lm.CV_ORCDRC=NULL, lm.CV_OCDENS=NULL){
out.all <- paste0(in.path, "/", i, "/OCSTHA_M_sd", 1:6, "_", i,".tif")
if(any(!file.exists(out.all))){
## depth to bedrock maps:
Expand All @@ -120,9 +120,15 @@ wrapper.ocd2OCSTHA <- function(i, in.path, BDR.lst=c("BDRICM","BDRLOG","BDTICM")
s0$OC <- rowMeans(s0@data[,grep("ORCDRC", names(s0))], na.rm = TRUE)
s0$BD <- rowMeans(s0@data[,grep("BLD.f", names(s0))], na.rm = TRUE)
s0$CF <- rowMeans(s0@data[,grep("CRFVOL", names(s0))], na.rm = TRUE)
if(bias.correction==TRUE){
ocd = rowMeans(s@data[,paste0("OCDENS_M_sl", d:(d+1), "_", i)], na.rm=TRUE)
ocd = expm1(predict(lm.CV_OCDENS, data.frame(Predicted=ocd/10)))*10
soc = s0$OC
soc = expm1(predict(lm.CV_ORCDRC, data.frame(Predicted=soc)))
}
## tons per ha (average between OCS based on OCD and based on the OCS formula):
v1 <- round(rowMeans(s@data[,paste0("OCDENS_M_sl", d:(d+1), "_", i)], na.rm=TRUE)*(sdepth[d+1]-sdepth[d])/100)
v2 <- round(as.vector(OCSKGM(ORCDRC=s0$OC, BLD=s0$BD, CRFVOL=s0$CF, HSIZE=(sdepth[d+1]-sdepth[d]), ORCDRC.sd=ORCDRC.sd, BLD.sd=BLD.sd, CRFVOL.sd=CRFVOL.sd)*10))
v1 <- round(ifelse(ocd<0,0,ocd)*(sdepth[d+1]-sdepth[d])/100)
v2 <- round(as.vector(GSIF::OCSKGM(ORCDRC=ifelse(soc<0,0,soc), BLD=s0$BD, CRFVOL=s0$CF, HSIZE=sdepth[d+1]-sdepth[d], ORCDRC.sd=ORCDRC.sd, BLD.sd=BLD.sd, CRFVOL.sd=CRFVOL.sd)*10))
v = (v1+v2[s@grid.index])/2
## Correct (reduce OCS) for depth to bedrock:
s@data[,paste0("OCSTHA_",d)] <- ifelse(sD@data[s@grid.index,"BDRICM"] > sdepth[d+1], v, ifelse(sD@data[s@grid.index,"BDRICM"] > sdepth[d], v*(sD@data[s@grid.index,"BDRICM"]-sdepth[d])/(sdepth[d+1]-sdepth[d]), 0))
Expand All @@ -146,22 +152,24 @@ wrapper.ocd2OCSTHA <- function(i, in.path, BDR.lst=c("BDRICM","BDRLOG","BDTICM")
}

#wrapper.ocd2OCSTHA(i="T38715", in.path="/data/tt/SoilGrids250m/predicted250m")
wrapper.ocd2OCSTHA(i="T40858", in.path="/data/tt/SoilGrids250m/predicted250m", lm.CV_ORCDRC=lm.CV_ORCDRC, lm.CV_OCDENS=lm.CV_OCDENS)
#wrapper.ocd2OCSTHA(i="T10410", in.path="/data/tt/SoilGrids250m/predicted250m")
#wrapper.ocd2OCSTHA(i="T34387", in.path="/data/tt/SoilGrids250m/predicted250m")

## Run in parallel:
pr.dirs <- basename(list.dirs("/data/tt/SoilGrids250m/predicted250m")[-1])

sfInit(parallel=TRUE, cpus=48)
sfExport("wrapper.ocd2OCSTHA")
library(snowfall)
sfInit(parallel=TRUE, cpus=24)
sfExport("wrapper.ocd2OCSTHA", "lm.CV_ORCDRC", "lm.CV_OCDENS", "pr.dirs")
sfLibrary(raster)
sfLibrary(rgdal)
sfLibrary(GSIF)
out <- sfClusterApplyLB(pr.dirs, function(i){try( wrapper.ocd2OCSTHA(i, in.path="/data/tt/SoilGrids250m/predicted250m") )})
out <- sfClusterApplyLB(pr.dirs, function(i){try( wrapper.ocd2OCSTHA(i, in.path="/data/tt/SoilGrids250m/predicted250m", lm.CV_ORCDRC=lm.CV_ORCDRC, lm.CV_OCDENS=lm.CV_OCDENS) )})
sfStop()

sfInit(parallel=TRUE, cpus=48)
sfExport("histosol.prob", "fao.lst", "usda.lst")
sfInit(parallel=TRUE, cpus=24)
sfExport("histosol.prob", "fao.lst", "usda.lst", "pr.dirs")
sfLibrary(raster)
sfLibrary(rgdal)
out <- sfClusterApplyLB(pr.dirs, function(i){try( histosol.prob(i, in.path="/data/tt/SoilGrids250m/predicted250m", fao.lst, usda.lst) )})
Expand All @@ -173,5 +181,10 @@ sfStop()
# unlink(del.lst)
# }

#del.lst <- list.files(path="/data/tt/SoilGrids250m/predicted250m", pattern=glob2rx(paste0("^OCSTHA*.tif")), full.names=TRUE, recursive=TRUE)
#unlink(del.lst)
#del.lst <- list.files(path="/data/tt/SoilGrids250m/predicted250m", pattern=glob2rx("^OCSTHA*.tif$"), full.names=TRUE, recursive=TRUE)
#x.t = as.vector(unlist(parallel::mclapply(del.lst, function(x){file.info(x)$ctime}, mc.cores=24)))
#summary(x.t<unclass(as.POSIXct("2018-01-20")))
#unlink(del.lst[x.t<unclass(as.POSIXct("2018-01-20"))])

#del.lst = list.files(path="/data/tt/SoilGrids250m/predicted250m", pattern=glob2rx(paste0("^OCSTHA_M_sd1_*.tif$")), full.names=TRUE, recursive=TRUE)
#str(del.lst)
53 changes: 42 additions & 11 deletions grids/models/SPROPS/m.SPROPS.R
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,19 @@ View(ovA[which(ovA$OCDENS>200),c("SOURCEID","BLD","ORCDRC","BLD.f","OCDENS")])
hexbinplot(log1p(ovA$OCDENS)~log1p(ovA$ORCDRC), colramp=colorRampPalette(pal), xlab="log - Organic carbon (permille)", ylab="log - Organic carbon density (kg/cubic-m)", type="g", lwd=1, lcex=8, inner=.2, cex.labels=.8, xbins=30, ybins=30, panel=pfun, colorcut=c(0,0.01,0.03,0.07,0.15,0.25,0.5,0.75,1))
## treshold at ca 12%
expm1(4.8)
## Average ORC, BLD per soil type:

## Litter layer:
sel.O = grep("O", ovA$HZDTXT, ignore.case = FALSE)
#length(sel.O)
#[1] 2104
O.pnts = ovA[sel.O,c("SOURCEID", "SOURCEDB", "LONWGS84", "LATWGS84", "HZDTXT", "UHDICM", "LHDICM", "BLD.f", "ORCDRC")]
O.pnts = O.pnts[!is.na(O.pnts$LONWGS84),]
coordinates(O.pnts) = ~ LONWGS84+LATWGS84
proj4string(O.pnts) = proj4string(SPROPS.pnts)
plotKML(O.pnts["ORCDRC"], folder.name="Litter layer", file.name="Litter_observed.kml")
zip(zipfile="Litter_observed.kmz", files="Litter_observed.kml", zip="zip")

## Average ORC, BLD per soil type ----
s.ovA = plyr::join(ovA[,c("SOURCEID", "SOURCEDB", "TIMESTRR", "LONWGS84", "LATWGS84", "HZDTXT", "UHDICM", "LHDICM", "BLD", "BLD.f", "ORCDRC", "OCDENS", "CLYPPT", "SNDPPT", "PHIHOX", "DEPTH.f")], data.frame(SOURCEID=SPROPS.pnts$SOURCEID, TAXOUSDA=as.factor(ov.tax)))
sum.dfsB = plyr::ddply(s.ovA, .(TAXOUSDA), summarize, M_ORC_30=round(mean(ORCDRC[DEPTH.f<30], na.rm=TRUE)), M_ORC_100=round(mean(ORCDRC[DEPTH.f<100], na.rm=TRUE)), M_BLD_30=round(mean(BLD.f[DEPTH.f<30], na.rm=TRUE)), M_BLD_100=round(mean(BLD.f[DEPTH.f<100], na.rm=TRUE)), M_OCD_30=round(mean(OCDENS[DEPTH.f<30], na.rm=TRUE)), N_horizons=sum(!is.na(ORCDRC)))
TAXOUSDA.leg <- read.csv("../TAXOUSDA/TAXOUSDA_legend.csv")
Expand Down Expand Up @@ -520,36 +532,55 @@ dev.off()

## Cross-validation 10-fold (TH: this does not takes into account high spatial clustering):

source("../../cv/cv_functions.R")
## Cross-validation 10-fold ----
## TH: this does not takes into account high spatial clustering

source("/data/cv/cv_functions.R")
cat("Results of Cross-validation:\n\n", file="resultsCV.txt")
cv_lst <- rep(list(NULL), length(t.vars))
for(j in 1:length(t.vars)){
if(!file.exists(paste0("CV_", t.vars[j], ".rda"))){
if(!file.exists(paste0("CV_", t.vars[j], ".rds"))){
cat(paste("Variable:", all.vars(formulaString.lst[[j]])[1]), file="resultsCV.txt", append=TRUE)
cat("\n", file="resultsCV.txt", append=TRUE)
cv_lst[[j]] <- cv_numeric(formulaString.lst[[j]], rmatrix=ovA[complete.cases(ovA[,all.vars(formulaString.lst[[j]])]),], nfold=10, idcol="SOURCEID", Log=TRUE)
cv_lst[[j]] <- cv_numeric(formulaString.lst[[j]], rmatrix=ovA[complete.cases(ovA[,all.vars(formulaString.lst[[j]])]),], nfold=10, cpus=1, idcol="SOURCEID", Log=TRUE)
sink(file="resultsCV.txt", append=TRUE, type="output")
print(cv_lst[[j]]$Summary)
cat("\n", file="resultsCV.txt", append=TRUE)
sink()
assign(paste0("CV_", t.vars[j]), cv_lst[[j]])
save(list=paste0("CV_", t.vars[j]), file=paste0("CV_", t.vars[j], ".rda"))
saveRDS.gz(cv_lst[[j]], paste0("CV_", t.vars[j], ".rds"))
}
}

## correlation plots:
source("../plot_hexbin.R")
plt.names <- c("SOC in g/kg", "Soil pH x 10 in H2O", "Soil pH x 10 in KCl", "Coarse fragments in %vol", "Sand fraction in %", "Silt fraction in %", "Clay fraction in %", "Bulk density (FE) in kg / m3", "CEC soil in cmolc/kg")
source("/data/models/plot_hexbin.R")
plt.names <- c("SOC in g/kg", "Soil pH x 10 in H2O", "Soil pH x 10 in KCl", "Coarse fragments in %vol", "Sand fraction in %", "Silt fraction in %", "Clay fraction in %", "Bulk density (FE) in kg / m3", "CEC soil in cmolc/kg", "Organic carbon density in kg / m3")
names(plt.names) = t.vars
breaks.lst <- list(c(0,5,10,seq(20,1000,length=47)), seq(2.5,9.5,length=50), seq(2.5,9.5,length=50), c(0,1,2,5,seq(8,100,length=46)), seq(0,100,length=50), seq(0,100,length=50), seq(0,100,length=50), seq(450,2200,length=50), c(0,1,2,5,seq(8,450,length=26)))
breaks.lst <- list(c(0,5,10,seq(20,1000,length=47)), seq(2.5,9.5,length=50), seq(2.5,9.5,length=50), c(0,1,2,5,seq(8,100,length=46)), seq(0,100,length=50), seq(0,100,length=50), seq(0,100,length=50), seq(450,2200,length=50), c(0,1,2,5,seq(8,450,length=26)), seq(0,1250,length=20))
names(breaks.lst) = t.vars
plt.log <- c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE)
plt.log <- c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE)
names(plt.log) = t.vars

for(j in 1:length(t.vars)){
plot_hexbin(j, breaks.lst[[t.vars[j]]], main=plt.names[t.vars[j]], in.file=paste0("CV_", t.vars[j], ".rda"), log.plot=plt.log[t.vars[j]])
plot_hexbin(t.vars[j], breaks.lst[[t.vars[j]]], main=plt.names[t.vars[j]], in.file=paste0("CV_", t.vars[j], ".rds"), log.plot=plt.log[t.vars[j]])
}

## Bias correction ----
mO <- readRDS("CV_ORCDRC.rds")
mO.s = mO$CV_residuals[sampleInt(size=6e4, n=nrow(mO$CV_residuals)),]
x = mO$CV_residuals$Predicted/mO$CV_residuals$Observed
mean(x[!x>20], na.rm=TRUE)
#lm.CV_ORCDRC = loess(log1p(Observed)~log1p(Predicted), mO.s, span=.8, control=loess.control(surface="direct"))
lm.CV_ORCDRC = lm(log1p(Observed)~ poly(log1p(Predicted), 3), mO$CV_residuals)
expm1(predict(lm.CV_ORCDRC, data.frame(Predicted=c(0,20,80,180,350,450))))
saveRDS(lm.CV_ORCDRC, "lm.CV_ORCDRC.rds")
mD <- readRDS("CV_OCDENS.rds")
mD.s = mD$CV_residuals[sampleInt(size=6e4, n=nrow(mD$CV_residuals)),]
#lm.CV_OCDENS = loess(log1p(Observed)~log1p(Predicted), mD.s, span=.8, control=loess.control(surface="direct"))
lm.CV_OCDENS = lm(log1p(Observed)~ poly(log1p(Predicted), 3), mD$CV_residuals)
expm1(predict(lm.CV_OCDENS, data.frame(Predicted=c(0,25,75,180,350,550))))
saveRDS(lm.CV_OCDENS, "lm.CV_OCDENS.rds")
save.image()

## Comparison with indepdent point data not used for model building:
load("/data/profs/SPROPS/tmp/SPROPS.NPDB_V2.rda")
SPROPS.NPDB_V2$LOC_ID = as.factor(paste("ID", SPROPS.NPDB_V2$LONWGS84, SPROPS.NPDB_V2$LATWGS84, sep="_"))
Expand Down
Binary file added grids/models/SPROPS/plot_CV_BLD.f.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_CECSUM.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_CLYPPT.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_CRFVOL.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added grids/models/SPROPS/plot_CV_OCDENS.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_ORCDRC.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_PHIHOX.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_PHIKCL.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_SLTPPT.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified grids/models/SPROPS/plot_CV_SNDPPT.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
38 changes: 21 additions & 17 deletions grids/models/SPROPS/resultsCV.txt
Original file line number Diff line number Diff line change
@@ -1,38 +1,42 @@
Results of Cross-validation:

Variable: ORCDRC
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.2918549 10.20963 32.79371 0.6354696 0.7152194 0.6878568
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.3170407 14.9308 44.30831 0.6176974 0.9240491 0.5685518

Variable: PHIHOX
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.002102178 0.3840512 0.544603 0.8340664 0.0766682 0.8243564
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.006049691 0.5563696 0.7482971 0.6829156 0.1047502 0.6689102

Variable: PHIKCL
ME MAE RMSE R.squared logRMSE logR.squared
1 0.001848172 0.4270301 0.600106 0.7672543 0.0982657 0.7573911
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.01369459 0.6075187 0.8124073 0.5343984 0.1329126 0.5217582

Variable: CRFVOL
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.1039682 5.51777 10.91137 0.5589506 0.9074957 0.6429036
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.4048594 8.095012 13.67336 0.3168269 1.286312 0.3617819

Variable: SNDPPT
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.03690913 8.959922 13.10269 0.7859223 0.5304159 0.7404695
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.3712353 14.57741 19.22129 0.5411525 0.765288 0.4731686

Variable: SLTPPT
ME MAE RMSE R.squared logRMSE logR.squared
1 0.0230658 6.748475 9.793534 0.7936035 0.4389706 0.7494746
1 0.1907141 9.832726 13.29313 0.6185066 0.5866695 0.5616901

Variable: CLYPPT
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.102017 6.631797 9.546816 0.7261315 0.5040807 0.6991649
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.1895747 9.588799 13.28649 0.4588173 0.6844829 0.4782277

Variable: BLD
Variable: BLD.f
ME MAE RMSE R.squared logRMSE logR.squared
1 -1.57435 108.2753 164.7266 0.7577097 0.2265415 0.6352354
1 3.041192 169.0642 245.6155 0.6943449 0.3648066 0.6548445

Variable: CECSUM
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.07096421 5.487295 10.31807 0.6449859 0.4831224 0.670228
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.3537095 7.182843 12.41247 0.436511 0.6156602 0.4737656

Variable: OCDENS
ME MAE RMSE R.squared logRMSE logR.squared
1 -0.6257806 11.0908 27.21392 0.4491045 0.9156944 0.6225582

6 changes: 4 additions & 2 deletions grids/models/plot_hexbin.R
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@ pfun <- function(x,y, ...){
}

plot_hexbin <- function(varn, breaks, main, colorcut=c(0,0.01,0.03,0.07,0.15,0.25,0.5,0.75,1), pal=R_pal[["bpy_colors"]][1:18], in.file, log.plot, out.file){
require("hexbin"); require("plotKML"); require("latticeExtra")
if(missing(out.file)){ out.file = paste0("plot_CV_", varn, ".png") }
if(!file.exists(out.file)){
load(in.file)
assign("m", get(paste0("CV_", varn)))
#load(in.file)
#assign("m", get(paste0("CV_", varn)))
m <- readRDS(in.file)
d.meas <- min(m[[1]]$Observed, na.rm=TRUE)
pred <- m[[1]]$Predicted
meas <- m[[1]]$Observed
Expand Down

0 comments on commit beb9a5b

Please sign in to comment.