Linear regression and leave-one-out cross-validation. See Chapter 11 in Regression and Other Stories.


Load packages

library("rprojroot")
root<-has_file(".ROS-Examples-root")$make_fix_file()
library("rstanarm")
library("loo")
library("foreign")
# for reproducability
SEED <- 1507

Load children's test scores data

kidiq <- read.csv(root("KidIQ/data","kidiq.csv"))
head(kidiq)
  kid_score mom_hs    mom_iq mom_work mom_age
1        65      1 121.11753        4      27
2        98      1  89.36188        4      25
3        85      1 115.44316        4      27
4        83      1  99.44964        3      25
5       115      1  92.74571        4      27
6        98      0 107.90184        1      18

Linear regression

The option refresh = 0 supresses the default Stan sampling progress output. This is useful for small data with fast computation. For more complex models and bigger data, it can be useful to see the progress.

fit_3 <- stan_glm(kid_score ~ mom_hs + mom_iq, data=kidiq,
                  seed=SEED, refresh = 0)
print(fit_3)
stan_glm
 family:       gaussian [identity]
 formula:      kid_score ~ mom_hs + mom_iq
 observations: 434
 predictors:   3
------
            Median MAD_SD
(Intercept) 25.8    5.9  
mom_hs       5.9    2.2  
mom_iq       0.6    0.1  

Auxiliary parameter(s):
      Median MAD_SD
sigma 18.2    0.6  

------
* For help interpreting the printed output see ?print.stanreg
* For info on the priors used see ?prior_summary.stanreg

Estimate the predictive performance of a model

using within-sample plug-in (ie with mean parameters) log-score

pluginlogscore_3 <- sum(dnorm(kidiq$kid_score, fitted(fit_3), sigma(fit_3), log = TRUE))
round(pluginlogscore_3, 1)
[1] -1872

Estimate the predictive performance of a model

using within-sample posterior predictive (ie integrating over parameters) log-score

sigmas <- as.matrix(fit_3)[,'sigma']
preds <- posterior_linpred(fit_3)
nsims <- nrow(preds)
logscore_3 <- sum(log(rowMeans(sapply(1:nsims, FUN = function(i) dnorm(kidiq$kid_score, preds[i,], sigmas[i], log=FALSE)))))
round(logscore_3, 1)
[1] -1872

Add five pure noise predictors to the data

set.seed(SEED)
n <- nrow(kidiq)
kidiqr <- kidiq
kidiqr$noise <- array(rnorm(5*n), c(n,5))

Linear regression with additional noise predictors

fit_3n <- stan_glm(kid_score ~ mom_hs + mom_iq + noise, data=kidiqr,
                   seed=SEED, refresh = 0)
print(fit_3n)
stan_glm
 family:       gaussian [identity]
 formula:      kid_score ~ mom_hs + mom_iq + noise
 observations: 434
 predictors:   8
------
            Median MAD_SD
(Intercept) 25.4    5.8  
mom_hs       6.0    2.3  
mom_iq       0.6    0.1  
noise1      -0.1    0.9  
noise2      -1.3    0.9  
noise3       0.0    0.9  
noise4       0.2    0.9  
noise5      -0.5    0.9  

Auxiliary parameter(s):
      Median MAD_SD
sigma 18.2    0.6  

------
* For help interpreting the printed output see ?print.stanreg
* For info on the priors used see ?prior_summary.stanreg

Estimate the predictive performance of a model

using within-sample plug-in (ie with mean parameters) log-score

pluginlogscore_3n <- sum(dnorm(kidiq$kid_score, fitted(fit_3n), sigma(fit_3n), log = TRUE))
round(pluginlogscore_3n, 1)
[1] -1870.9

Estimate the predictive performance of a model

using within-sample posterior predictive (ie integrating over parameters) log-score

sigmas <- as.matrix(fit_3n)[,'sigma']
preds <- posterior_linpred(fit_3n)
logscore_3n <- sum(log(rowMeans(sapply(1:nsims, FUN = function(i) dnorm(kidiq$kid_score, preds[i,], sigmas[i], log=FALSE)))))
round(logscore_3n, 1)
[1] -1871.1

Compare models with within-sample plug-in log scores

round(pluginlogscore_3n - pluginlogscore_3, 1)
[1] 1.1

Compare models with within-sample posterior predictive log scores

round(logscore_3n - logscore_3, 1)
[1] 0.9

Compare models with LOO-CV

Estimate the predictive performance of models using LOO-CV

loo_3 <- loo(fit_3)
print(loo_3)

Computed from 4000 by 434 log-likelihood matrix

         Estimate   SE
elpd_loo  -1876.1 14.2
p_loo         4.0  0.4
looic      3752.1 28.5
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_3n <- loo(fit_3n)
print(loo_3n)

Computed from 4000 by 434 log-likelihood matrix

         Estimate   SE
elpd_loo  -1879.8 14.2
p_loo         8.7  0.7
looic      3759.6 28.4
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_compare(loo_3, loo_3n)
       elpd_diff se_diff
fit_3   0.0       0.0   
fit_3n -3.7       1.5   

Linear regression with different predictors

fit_1 <- stan_glm(kid_score ~ mom_hs, data=kidiq,
                  seed = SEED, refresh = 0)
loo_1 <- loo(fit_1)
print(loo_1)

Computed from 4000 by 434 log-likelihood matrix

         Estimate   SE
elpd_loo  -1914.8 13.8
p_loo         3.1  0.3
looic      3829.6 27.6
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_compare(loo_3, loo_1)
      elpd_diff se_diff
fit_3   0.0       0.0  
fit_1 -38.7       8.3  

Linear regression with interaction

fit_4 <- stan_glm(kid_score ~ mom_hs + mom_iq + mom_iq:mom_hs,
                  data=kidiq, seed=SEED, refresh = 0)
print(fit_4)
stan_glm
 family:       gaussian [identity]
 formula:      kid_score ~ mom_hs + mom_iq + mom_iq:mom_hs
 observations: 434
 predictors:   4
------
              Median MAD_SD
(Intercept)   -9.8   14.0  
mom_hs        49.5   15.2  
mom_iq         1.0    0.1  
mom_hs:mom_iq -0.5    0.2  

Auxiliary parameter(s):
      Median MAD_SD
sigma 18.0    0.6  

------
* For help interpreting the printed output see ?print.stanreg
* For info on the priors used see ?prior_summary.stanreg
loo_4 <- loo(fit_4)
print(loo_4)

Computed from 4000 by 434 log-likelihood matrix

         Estimate   SE
elpd_loo  -1872.7 14.4
p_loo         5.0  0.5
looic      3745.3 28.8
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_compare(loo_3, loo_4)
      elpd_diff se_diff
fit_4  0.0       0.0   
fit_3 -3.4       2.8   

Linear regression with log-transformation and interaction

fit_5 <- stan_glm(kid_score ~ mom_hs + log(mom_iq) + log(mom_iq):mom_hs,
                  data=kidiq, seed=SEED, refresh = 0)
print(fit_5)
stan_glm
 family:       gaussian [identity]
 formula:      kid_score ~ mom_hs + log(mom_iq) + log(mom_iq):mom_hs
 observations: 434
 predictors:   4
------
                   Median MAD_SD
(Intercept)        -279.9   50.2
mom_hs              118.6   52.3
log(mom_iq)          79.3   11.1
mom_hs:log(mom_iq)  -25.0   11.6

Auxiliary parameter(s):
      Median MAD_SD
sigma 17.9    0.6  

------
* For help interpreting the printed output see ?print.stanreg
* For info on the priors used see ?prior_summary.stanreg
loo_5 <- loo(fit_5)
print(loo_5)

Computed from 4000 by 434 log-likelihood matrix

         Estimate   SE
elpd_loo  -1871.3 14.5
p_loo         4.5  0.4
looic      3742.6 29.0
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_compare(loo_3, loo_5)
      elpd_diff se_diff
fit_5  0.0       0.0   
fit_3 -4.8       2.0   

Compare several models

loo_compare(loo_1, loo_3, loo_4, loo_5)
      elpd_diff se_diff
fit_5   0.0       0.0  
fit_4  -1.4       1.3  
fit_3  -4.8       2.0  
fit_1 -43.5       8.9  
LS0tCnRpdGxlOiAiUmVncmVzc2lvbiBhbmQgT3RoZXIgU3RvcmllczogS2lkSVEgY3Jvc3MtdmFsaWRhdGlvbiIKYXV0aG9yOiAiQW5kcmV3IEdlbG1hbiwgSmVubmlmZXIgSGlsbCwgQWtpIFZlaHRhcmkiCmRhdGU6ICJgciBmb3JtYXQoU3lzLkRhdGUoKSlgIgpvdXRwdXQ6CiAgaHRtbF9kb2N1bWVudDoKICAgIHRoZW1lOiByZWFkYWJsZQogICAgdG9jOiB0cnVlCiAgICB0b2NfZGVwdGg6IDIKICAgIHRvY19mbG9hdDogdHJ1ZQogICAgY29kZV9kb3dubG9hZDogdHJ1ZQotLS0KTGluZWFyIHJlZ3Jlc3Npb24gYW5kIGxlYXZlLW9uZS1vdXQgY3Jvc3MtdmFsaWRhdGlvbi4gU2VlIENoYXB0ZXIgMTEgaW4KUmVncmVzc2lvbiBhbmQgT3RoZXIgU3Rvcmllcy4KCi0tLS0tLS0tLS0tLS0KCgpgYGB7ciBzZXR1cCwgaW5jbHVkZT1GQUxTRX0Ka25pdHI6Om9wdHNfY2h1bmskc2V0KG1lc3NhZ2U9RkFMU0UsIGVycm9yPUZBTFNFLCB3YXJuaW5nPUZBTFNFLCBjb21tZW50PU5BKQpgYGAKCiMjIyMgTG9hZCBwYWNrYWdlcwoKYGBge3IgfQpsaWJyYXJ5KCJycHJvanJvb3QiKQpyb290PC1oYXNfZmlsZSgiLlJPUy1FeGFtcGxlcy1yb290IikkbWFrZV9maXhfZmlsZSgpCmxpYnJhcnkoInJzdGFuYXJtIikKbGlicmFyeSgibG9vIikKbGlicmFyeSgiZm9yZWlnbiIpCiMgZm9yIHJlcHJvZHVjYWJpbGl0eQpTRUVEIDwtIDE1MDcKYGBgCgojIyMjIExvYWQgY2hpbGRyZW4ncyB0ZXN0IHNjb3JlcyBkYXRhCgpgYGB7ciB9CmtpZGlxIDwtIHJlYWQuY3N2KHJvb3QoIktpZElRL2RhdGEiLCJraWRpcS5jc3YiKSkKaGVhZChraWRpcSkKYGBgCgojIyBMaW5lYXIgcmVncmVzc2lvbgoKVGhlIG9wdGlvbiBgcmVmcmVzaCA9IDBgIHN1cHJlc3NlcyB0aGUgZGVmYXVsdCBTdGFuIHNhbXBsaW5nCnByb2dyZXNzIG91dHB1dC4gVGhpcyBpcyB1c2VmdWwgZm9yIHNtYWxsIGRhdGEgd2l0aCBmYXN0CmNvbXB1dGF0aW9uLiBGb3IgbW9yZSBjb21wbGV4IG1vZGVscyBhbmQgYmlnZ2VyIGRhdGEsIGl0IGNhbiBiZQp1c2VmdWwgdG8gc2VlIHRoZSBwcm9ncmVzcy4KCmBgYHtyIH0KZml0XzMgPC0gc3Rhbl9nbG0oa2lkX3Njb3JlIH4gbW9tX2hzICsgbW9tX2lxLCBkYXRhPWtpZGlxLAogICAgICAgICAgICAgICAgICBzZWVkPVNFRUQsIHJlZnJlc2ggPSAwKQpwcmludChmaXRfMykKYGBgCgojIyMjIEVzdGltYXRlIHRoZSBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlIG9mIGEgbW9kZWwKdXNpbmcgd2l0aGluLXNhbXBsZSBwbHVnLWluIChpZSB3aXRoIG1lYW4gcGFyYW1ldGVycykgbG9nLXNjb3JlCgpgYGB7ciB9CnBsdWdpbmxvZ3Njb3JlXzMgPC0gc3VtKGRub3JtKGtpZGlxJGtpZF9zY29yZSwgZml0dGVkKGZpdF8zKSwgc2lnbWEoZml0XzMpLCBsb2cgPSBUUlVFKSkKcm91bmQocGx1Z2lubG9nc2NvcmVfMywgMSkKYGBgCgojIyMjIEVzdGltYXRlIHRoZSBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlIG9mIGEgbW9kZWwKdXNpbmcgd2l0aGluLXNhbXBsZSBwb3N0ZXJpb3IgcHJlZGljdGl2ZSAoaWUgaW50ZWdyYXRpbmcgb3ZlciBwYXJhbWV0ZXJzKSBsb2ctc2NvcmUKCmBgYHtyIH0Kc2lnbWFzIDwtIGFzLm1hdHJpeChmaXRfMylbLCdzaWdtYSddCnByZWRzIDwtIHBvc3Rlcmlvcl9saW5wcmVkKGZpdF8zKQpuc2ltcyA8LSBucm93KHByZWRzKQpsb2dzY29yZV8zIDwtIHN1bShsb2cocm93TWVhbnMoc2FwcGx5KDE6bnNpbXMsIEZVTiA9IGZ1bmN0aW9uKGkpIGRub3JtKGtpZGlxJGtpZF9zY29yZSwgcHJlZHNbaSxdLCBzaWdtYXNbaV0sIGxvZz1GQUxTRSkpKSkpCnJvdW5kKGxvZ3Njb3JlXzMsIDEpCmBgYAoKIyMgQWRkIGZpdmUgcHVyZSBub2lzZSBwcmVkaWN0b3JzIHRvIHRoZSBkYXRhCgpgYGB7ciB9CnNldC5zZWVkKFNFRUQpCm4gPC0gbnJvdyhraWRpcSkKa2lkaXFyIDwtIGtpZGlxCmtpZGlxciRub2lzZSA8LSBhcnJheShybm9ybSg1Km4pLCBjKG4sNSkpCmBgYAoKIyMjIyBMaW5lYXIgcmVncmVzc2lvbiB3aXRoIGFkZGl0aW9uYWwgbm9pc2UgcHJlZGljdG9ycwoKYGBge3IgfQpmaXRfM24gPC0gc3Rhbl9nbG0oa2lkX3Njb3JlIH4gbW9tX2hzICsgbW9tX2lxICsgbm9pc2UsIGRhdGE9a2lkaXFyLAogICAgICAgICAgICAgICAgICAgc2VlZD1TRUVELCByZWZyZXNoID0gMCkKcHJpbnQoZml0XzNuKQpgYGAKCiMjIyMgRXN0aW1hdGUgdGhlIHByZWRpY3RpdmUgcGVyZm9ybWFuY2Ugb2YgYSBtb2RlbAp1c2luZyB3aXRoaW4tc2FtcGxlIHBsdWctaW4gKGllIHdpdGggbWVhbiBwYXJhbWV0ZXJzKSBsb2ctc2NvcmUKCmBgYHtyIH0KcGx1Z2lubG9nc2NvcmVfM24gPC0gc3VtKGRub3JtKGtpZGlxJGtpZF9zY29yZSwgZml0dGVkKGZpdF8zbiksIHNpZ21hKGZpdF8zbiksIGxvZyA9IFRSVUUpKQpyb3VuZChwbHVnaW5sb2dzY29yZV8zbiwgMSkKYGBgCgojIyMjIEVzdGltYXRlIHRoZSBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlIG9mIGEgbW9kZWwKdXNpbmcgd2l0aGluLXNhbXBsZSBwb3N0ZXJpb3IgcHJlZGljdGl2ZSAoaWUgaW50ZWdyYXRpbmcgb3ZlciBwYXJhbWV0ZXJzKSBsb2ctc2NvcmUKCmBgYHtyIH0Kc2lnbWFzIDwtIGFzLm1hdHJpeChmaXRfM24pWywnc2lnbWEnXQpwcmVkcyA8LSBwb3N0ZXJpb3JfbGlucHJlZChmaXRfM24pCmxvZ3Njb3JlXzNuIDwtIHN1bShsb2cocm93TWVhbnMoc2FwcGx5KDE6bnNpbXMsIEZVTiA9IGZ1bmN0aW9uKGkpIGRub3JtKGtpZGlxJGtpZF9zY29yZSwgcHJlZHNbaSxdLCBzaWdtYXNbaV0sIGxvZz1GQUxTRSkpKSkpCnJvdW5kKGxvZ3Njb3JlXzNuLCAxKQpgYGAKCiMjIyMgQ29tcGFyZSBtb2RlbHMgd2l0aCB3aXRoaW4tc2FtcGxlIHBsdWctaW4gbG9nIHNjb3JlcwoKYGBge3IgfQpyb3VuZChwbHVnaW5sb2dzY29yZV8zbiAtIHBsdWdpbmxvZ3Njb3JlXzMsIDEpCmBgYAoKIyMjIyBDb21wYXJlIG1vZGVscyB3aXRoIHdpdGhpbi1zYW1wbGUgcG9zdGVyaW9yIHByZWRpY3RpdmUgbG9nIHNjb3JlcwoKYGBge3IgfQpyb3VuZChsb2dzY29yZV8zbiAtIGxvZ3Njb3JlXzMsIDEpCmBgYAoKIyMgQ29tcGFyZSBtb2RlbHMgd2l0aCBMT08tQ1YKCiMjIyMgRXN0aW1hdGUgdGhlIHByZWRpY3RpdmUgcGVyZm9ybWFuY2Ugb2YgbW9kZWxzIHVzaW5nIExPTy1DVgoKYGBge3IgfQpsb29fMyA8LSBsb28oZml0XzMpCnByaW50KGxvb18zKQpsb29fM24gPC0gbG9vKGZpdF8zbikKcHJpbnQobG9vXzNuKQpsb29fY29tcGFyZShsb29fMywgbG9vXzNuKQpgYGAKCiMjIyMgTGluZWFyIHJlZ3Jlc3Npb24gd2l0aCBkaWZmZXJlbnQgcHJlZGljdG9ycwoKYGBge3IgfQpmaXRfMSA8LSBzdGFuX2dsbShraWRfc2NvcmUgfiBtb21faHMsIGRhdGE9a2lkaXEsCiAgICAgICAgICAgICAgICAgIHNlZWQgPSBTRUVELCByZWZyZXNoID0gMCkKbG9vXzEgPC0gbG9vKGZpdF8xKQpwcmludChsb29fMSkKbG9vX2NvbXBhcmUobG9vXzMsIGxvb18xKQpgYGAKCiMjIyMgTGluZWFyIHJlZ3Jlc3Npb24gd2l0aCBpbnRlcmFjdGlvbgoKYGBge3IgfQpmaXRfNCA8LSBzdGFuX2dsbShraWRfc2NvcmUgfiBtb21faHMgKyBtb21faXEgKyBtb21faXE6bW9tX2hzLAogICAgICAgICAgICAgICAgICBkYXRhPWtpZGlxLCBzZWVkPVNFRUQsIHJlZnJlc2ggPSAwKQpwcmludChmaXRfNCkKbG9vXzQgPC0gbG9vKGZpdF80KQpwcmludChsb29fNCkKbG9vX2NvbXBhcmUobG9vXzMsIGxvb180KQpgYGAKCiMjIyMgTGluZWFyIHJlZ3Jlc3Npb24gd2l0aCBsb2ctdHJhbnNmb3JtYXRpb24gYW5kIGludGVyYWN0aW9uCgpgYGB7ciB9CmZpdF81IDwtIHN0YW5fZ2xtKGtpZF9zY29yZSB+IG1vbV9ocyArIGxvZyhtb21faXEpICsgbG9nKG1vbV9pcSk6bW9tX2hzLAogICAgICAgICAgICAgICAgICBkYXRhPWtpZGlxLCBzZWVkPVNFRUQsIHJlZnJlc2ggPSAwKQpwcmludChmaXRfNSkKbG9vXzUgPC0gbG9vKGZpdF81KQpwcmludChsb29fNSkKbG9vX2NvbXBhcmUobG9vXzMsIGxvb181KQpgYGAKCiMjIyMgQ29tcGFyZSBzZXZlcmFsIG1vZGVscwoKYGBge3IgfQpsb29fY29tcGFyZShsb29fMSwgbG9vXzMsIGxvb180LCBsb29fNSkKYGBgCgo=