From 01b7088b67163b5f1e37328ceb83442499186805 Mon Sep 17 00:00:00 2001 From: Christophe Dutang Date: Thu, 22 Feb 2024 21:33:54 +0100 Subject: [PATCH] cosmetic change --- vignettes/Optimalgo.Rmd | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/vignettes/Optimalgo.Rmd b/vignettes/Optimalgo.Rmd index 9d1bfabd..8c611183 100644 --- a/vignettes/Optimalgo.Rmd +++ b/vignettes/Optimalgo.Rmd @@ -203,7 +203,7 @@ grlnlbeta(c(3, 4), x) #test hist(x, prob=TRUE) lines(density(x), col="red") curve(dbeta(x, 3, 3/4), col="green", add=TRUE) -legend("topleft", lty=1, col=c("red","green"), leg=c("empirical", "theoretical")) +legend("topleft", lty=1, col=c("red","green"), legend=c("empirical", "theoretical"), bty="n") ``` ## Fit Beta distribution @@ -253,18 +253,23 @@ Results are displayed in the following tables: (4) the log-transformed parametrization with the (true) gradient (`-G` stands for gradient). ```{r, results='asis', echo=FALSE} -kable(unconstropt[, grep("G-", colnames(unconstropt), invert=TRUE)], digits=3) +kable(unconstropt[, grep("G-", colnames(unconstropt), invert=TRUE)], digits=3, + caption="Unconstrained optimization with approximated gradient") ``` ```{r, results='asis', echo=FALSE} -kable(unconstropt[, grep("G-", colnames(unconstropt))], digits=3) +kable(unconstropt[, grep("G-", colnames(unconstropt))], digits=3, + caption="Unconstrained optimization with true gradient") ``` ```{r, results='asis', echo=FALSE} -kable(expopt[, grep("G-", colnames(expopt), invert=TRUE)], digits=3) +kable(expopt[, grep("G-", colnames(expopt), invert=TRUE)], digits=3, + caption="Exponential trick optimization with approximated gradient") ``` + ```{r, results='asis', echo=FALSE} -kable(expopt[, grep("G-", colnames(expopt))], digits=3) +kable(expopt[, grep("G-", colnames(expopt))], digits=3, + caption="Exponential trick optimization with true gradient") ``` @@ -343,7 +348,7 @@ grlnlNB <- function(x, obs, ...) ## Random generation of a sample ```{r, fig.height=4, fig.width=4} -#(1) beta distribution +#(2) negative binomial distribution n <- 200 trueval <- c("size"=10, "prob"=3/4, "mu"=10/3) x <- rnbinom(n, trueval["size"], trueval["prob"]) @@ -352,8 +357,8 @@ hist(x, prob=TRUE, ylim=c(0, .3)) lines(density(x), col="red") points(min(x):max(x), dnbinom(min(x):max(x), trueval["size"], trueval["prob"]), col = "green") -legend("topleft", lty = 1, col = c("red", "green"), - leg = c("empirical", "theoretical")) +legend("topright", lty = 1, col = c("red", "green"), + legend = c("empirical", "theoretical"), bty="n") ``` ## Fit a negative binomial distribution @@ -404,19 +409,23 @@ Results are displayed in the following tables: (3) the log-transformed parametrization without specifying the gradient, (4) the log-transformed parametrization with the (true) gradient (`-G` stands for gradient). -```{r, results='asis', echo=FALSE} -kable(unconstropt[, grep("G-", colnames(unconstropt), invert=TRUE)], digits=3) +```{r, echo=FALSE} +kable(unconstropt[, grep("G-", colnames(unconstropt), invert=TRUE)], digits=3, + caption="Unconstrained optimization with approximated gradient") ``` ```{r, results='asis', echo=FALSE} -kable(unconstropt[, grep("G-", colnames(unconstropt))], digits=3) +kable(unconstropt[, grep("G-", colnames(unconstropt))], digits=3, + caption="Unconstrained optimization with true gradient") ``` ```{r, results='asis', echo=FALSE} -kable(expopt[, grep("G-", colnames(expopt), invert=TRUE)], digits=3) +kable(expopt[, grep("G-", colnames(expopt), invert=TRUE)], digits=3, + caption="Exponential trick optimization with approximated gradient") ``` ```{r, results='asis', echo=FALSE} -kable(expopt[, grep("G-", colnames(expopt))], digits=3) +kable(expopt[, grep("G-", colnames(expopt))], digits=3, + caption="Exponential trick optimization with true gradient") ``` @@ -436,8 +445,7 @@ We can simulate bootstrap replicates using the `bootdist` function. b1 <- bootdist(fitdist(x, "nbinom", method = "mle", optim.method = "BFGS"), niter = 100, parallel = "snow", ncpus = 2) summary(b1) -plot(b1) -abline(v = trueval["size"], h = trueval["mu"], col = "red", lwd = 1.5) +plot(b1, trueval=trueval) ``` @@ -445,7 +453,7 @@ abline(v = trueval["size"], h = trueval["mu"], col = "red", lwd = 1.5) # Conclusion Based on the two previous examples, we observe that all methods converge to the same -point. This is rassuring. +point. This is reassuring. However, the number of function evaluations (and the gradient evaluations) is very different from a method to another. Furthermore, specifying the true gradient of the log-likelihood does not