Setup

Load packages

library(rstan)
library(ggplot2)
library(bayesplot)
theme_set(bayesplot::theme_default(base_family = "sans"))
library(loo)
library(rprojroot)
root<-has_file(".BDA_R_demos_root")$make_fix_file()

1 Load and examine data

source(root("demos_rstan/ppc", "count-data.R"))
print(N)
[1] 500
print(y)
  [1] 0 3 5 0 4 7 4 2 3 6 7 0 0 3 7 5 5 0 4 0 4 4 6 3 7 5 3 0 0 2 0 1 0 1 5 4 4
 [38] 2 3 6 4 5 0 7 7 4 4 4 0 6 1 5 6 5 6 7 3 6 2 3 0 2 0 6 6 0 3 4 4 5 5 0 5 7
 [75] 5 5 6 4 2 3 4 6 4 6 6 4 0 6 5 5 7 0 1 6 7 0 5 0 0 5 6 5 1 0 7 1 2 6 5 4 0
[112] 4 0 4 4 6 3 0 0 3 3 4 2 5 3 4 3 2 5 2 4 4 0 2 7 5 7 5 5 7 7 0 4 6 0 4 6 7
[149] 4 0 4 1 5 0 3 5 7 6 0 5 5 6 7 6 7 3 4 3 7 7 2 5 4 5 5 0 6 2 4 5 4 0 0 5 5
[186] 7 7 0 3 0 3 3 6 1 4 2 0 4 7 5 5 0 3 7 0 6 6 4 1 6 7 6 0 3 6 4 7 0 5 5 4 0
[223] 0 2 4 6 0 5 0 2 7 2 7 5 4 6 2 4 0 4 0 0 3 5 4 3 5 5 7 7 0 6 4 5 1 5 3 5 5
[260] 5 0 2 7 6 2 3 2 5 4 7 6 7 3 3 4 4 6 4 6 7 1 5 6 3 3 6 3 4 0 7 0 3 6 5 0 0
[297] 0 5 4 4 0 4 7 5 5 3 3 0 0 5 4 0 7 6 0 6 2 0 6 1 0 4 0 4 3 0 4 5 5 7 6 6 5
[334] 4 7 0 6 4 7 7 5 0 1 4 7 6 4 5 4 7 2 5 2 6 3 2 7 4 3 4 6 6 6 6 7 1 0 0 7 7
[371] 4 2 4 5 5 7 4 1 7 6 5 6 5 4 0 0 7 0 0 5 6 6 3 6 0 0 0 4 4 3 0 7 5 4 2 7 0
[408] 4 0 0 2 4 5 0 4 2 5 2 0 6 6 3 6 0 2 5 0 0 0 6 0 0 6 5 4 6 4 5 5 4 0 3 4 3
[445] 3 5 3 4 5 7 0 0 1 4 6 3 5 7 6 6 5 0 5 4 0 0 2 6 0 6 0 4 5 6 3 4 2 3 4 0 5
[482] 0 0 0 0 3 4 7 6 7 7 3 4 4 7 4 5 2 5 6
qplot(y)
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

1.1 Plot histogram of draws from Poisson with same mean

x <- rpois(N, mean(y))
qplot(x)
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

plotdata <- data.frame(
  value = c(y, x), 
  variable = rep(c("Our data", "Poisson data"), each = N)
)
ggplot(plotdata, aes(x = value, color = variable)) + 
  geom_freqpoly(binwidth = 0.5) +
  scale_x_continuous(name = "", breaks = 0:max(x,y)) +
  scale_color_manual(name = "", values = c("gray30", "purple"))

2 Fit basic Poisson model

Even though we already suspect it won’t be a good model for this data, it’s still a good idea to start by fitting the simplest Poisson model. From there we can then identify in which ways the model is inadequate.

code_poisson <- root("demos_rstan/ppc", "poisson-simple.stan")
writeLines(readLines(code_poisson))
data {
  int<lower=1> N;
  array[N] int<lower=0> y;
}
parameters {
  real<lower=0> lambda;
}
model {
  lambda ~ exponential(0.2);
  y ~ poisson(lambda);
}
generated quantities {
  array[N] real log_lik;
  array[N] int y_rep;
  for (n in 1 : N) {
    y_rep[n] = poisson_rng(lambda);
    log_lik[n] = poisson_lpmf(y[n] | lambda);
  }
}
fit <- stan(file = code_poisson, data = c("y", "N"), refresh = 0)
monitor(as.array(fit)[,,c("lambda","lp__")])
Inference for the input samples (4 chains: each with iter = 1000; warmup = 500):

          Q5   Q50   Q95  Mean  SD  Rhat Bulk_ESS Tail_ESS
lambda   3.5   3.7   3.8   3.7 0.1     1      863     1060
lp__   544.4 546.0 546.2 545.7 0.7     1      946     1365

For each parameter, Bulk_ESS and Tail_ESS are crude measures of 
effective sample size for bulk and tail quantities respectively (an ESS > 100 
per chain is considered good), and Rhat is the potential scale reduction 
factor on rank normalized split chains (at convergence, Rhat <= 1.05).

2.1 Look at posterior distribution of lambda

color_scheme_set("brightblue") # check out ?bayesplot::color_scheme_set
lambda_draws <- as.matrix(fit, pars = "lambda")
mcmc_areas(lambda_draws, prob = 0.8) # color 80% interval

2.2 Check summary of lambda and compare to mean of the data

print(fit, pars = "lambda")
Inference for Stan model: anon_model.
4 chains, each with iter=2000; warmup=1000; thin=1; 
post-warmup draws per chain=1000, total post-warmup draws=4000.

       mean se_mean   sd 2.5%  25%  50%  75% 97.5% n_eff Rhat
lambda 3.67       0 0.08 3.51 3.61 3.67 3.72  3.83  1513    1

Samples were drawn using NUTS(diag_e) at Wed Oct 26 15:42:41 2022.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at 
convergence, Rhat=1).
mean(y)
[1] 3.662

The model gets the mean right, but, as we’ll see next, the model is quite bad at predicting the outcome.

3 Graphical posterior predictive checks

3.1 Extract y_rep draws from the fitted model object

y_rep <- as.matrix(fit, pars = "y_rep")

# number of rows = number of post-warmup posterior draws
# number of columns = length(y)
dim(y_rep) 
[1] 4000  500

3.2 Compare histogram of y to histograms of several y_reps

ppc_hist(y, y_rep[1:8, ], binwidth = 1)

3.3 Compare density estimate of y to density estimates of a bunch of y_reps

ppc_dens_overlay(y, y_rep[1:50, ])

3.4 Rootogram shows the square root of counts

Rootogram shows the square root of counts, with histogram for the observed data and and model predictions (yrep) as a ribbon

ppc_rootogram(y, y_rep)

3.5 Compare proportion of zeros in y to the distribution of that proportion over all y_reps

prop_zero <- function(x) mean(x == 0)
print(prop_zero(y))
[1] 0.202
ppc_stat(y, y_rep, stat = "prop_zero")

3.6 Some other checks

Look at two statistics in a scatterplot:

ppc_stat_2d(y, y_rep, stat = c("mean", "sd"))

Distributions of predictive errors:

ppc_error_hist(y, y_rep[1:4, ], binwidth = 1) + xlim(-15, 15)
Warning: Removed 8 rows containing missing values (geom_bar).

4 Fit Poisson “hurdle” model (also with truncation from above)

This model says that there is some probability theta that y is zero and probability 1 - theta that y is positive. Conditional on observing a positive y, we use a truncated Poisson

y[n] ~ Poisson(lambda) T[1, U];

where T[1,U] indicates truncation with lower bound 1 and upper bound U, which for simplicity we’ll assume is max(y).

code_poisson_hurdle <- root("demos_rstan/ppc", "poisson-hurdle.stan")
writeLines(readLines(code_poisson_hurdle))
/* Poisson "hurdle" model (also with upper truncation)
 *
 * Note: for simplicity I assume that there is only one way to obtain
 * a zero, as opposed to some zero-inflated models where there are
 * multiple processes that lead to a zero. So in this example, y is
 * zero with probability theta and y is modeled as a truncated poisson
 * if greater than zero.
 */

data {
  int<lower=1> N;
  array[N] int<lower=0> y;
}
transformed data {
  int U = max(y); // upper truncation point
}
parameters {
  real<lower=0, upper=1> theta; // Pr(y = 0)
  real<lower=0> lambda; // Poisson rate parameter (if y > 0)
}
model {
  lambda ~ exponential(0.2);

  for (n in 1 : N) {
    if (y[n] == 0) {
      target += log(theta); // log(Pr(y = 0))
    } else {
      target += log1m(theta); // log(Pr(y > 0))
      y[n] ~ poisson(lambda) T[1, U]; // truncated poisson
    }
  }
}
generated quantities {
  array[N] real log_lik;
  array[N] int y_rep;
  for (n in 1 : N) {
    if (bernoulli_rng(theta)) {
      y_rep[n] = 0;
    } else {
      // use a while loop because Stan doesn't have truncated RNGs
      int w; // temporary variable
      w = poisson_rng(lambda);
      while (w == 0 || w > U) {
        w = poisson_rng(lambda);
      }

      y_rep[n] = w;
    }
    if (y[n] == 0) {
      log_lik[n] = log(theta);
    } else {
      log_lik[n] = log1m(theta) + poisson_lpmf(y[n] | lambda)
                   - log_diff_exp(poisson_lcdf(U | lambda),
                                  poisson_lcdf(0 | lambda));
    }
  }
}
fit2 <- stan(file = code_poisson_hurdle, data = c("y", "N"), refresh = 0)
print(fit2, pars = c("lambda", "theta"))
Inference for Stan model: anon_model.
4 chains, each with iter=2000; warmup=1000; thin=1; 
post-warmup draws per chain=1000, total post-warmup draws=4000.

       mean se_mean   sd 2.5%  25% 50%  75% 97.5% n_eff Rhat
lambda  5.3       0 0.16 5.00 5.19 5.3 5.41  5.63  3335    1
theta   0.2       0 0.02 0.17 0.19 0.2 0.22  0.24  2839    1

Samples were drawn using NUTS(diag_e) at Wed Oct 26 15:44:34 2022.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at 
convergence, Rhat=1).

4.1 Compare posterior distributions of lambda from the two models

lambda_draws2 <- as.matrix(fit2, pars = "lambda")
lambdas <- cbind(lambda_fit1 = lambda_draws[, 1],
                 lambda_fit2 = lambda_draws2[, 1])
mcmc_areas(lambdas, prob = 0.8) # color 80% interval

5 Posterior predictive checks again

Same plots as before (and a few others), but this time using y_rep from fit2. Everything looks much more reasonable:

y_rep2 <- as.matrix(fit2, pars = "y_rep")
ppc_hist(y, y_rep2[1:8, ], binwidth = 1)

ppc_dens_overlay(y, y_rep2[1:50, ])

ppc_rootogram(y, y_rep2)

ppc_stat(y, y_rep2, stat = "prop_zero")

ppc_stat_2d(y, y_rep2, stat = c("mean", "sd"))

ppc_error_hist(y, y_rep2[sample(nrow(y_rep2), 4), ], binwidth = 1)

6 Comparison with leave-one-out cross-validation

Compare predictive performance with LOO.

(loo1 <- loo(fit))

Computed from 4000 by 500 log-likelihood matrix

         Estimate   SE
elpd_loo  -1207.9 17.5
p_loo         1.4  0.1
looic      2415.7 35.0
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
(loo2 <- loo(fit2))

Computed from 4000 by 500 log-likelihood matrix

         Estimate   SE
elpd_loo   -992.0  9.3
p_loo         2.1  0.1
looic      1984.1 18.6
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.
loo_compare(loo1, loo2)
       elpd_diff se_diff
model2    0.0       0.0 
model1 -215.8      22.1 

Model 2 is a clear winner in the predictive performance.

LS0tCnRpdGxlOiAiRGV0ZWN0aW5nIG1vZGVsIG1pc2ZpdCB3aXRoIHBvc3RlcmlvciBwcmVkaWN0aXZlIGNoZWNrcyIKYXV0aG9yOiAiSm9uYWggR2FicnksIEFraSBWZWh0YXJpIgpkYXRlOiAiTGFzdCBtb2RpZmllZCBgciBmb3JtYXQoU3lzLkRhdGUoKSlgLiIKb3V0cHV0OgogIGh0bWxfZG9jdW1lbnQ6CiAgICBmaWdfY2FwdGlvbjogeWVzCiAgICB0b2M6IFRSVUUKICAgIHRvY19kZXB0aDogMgogICAgbnVtYmVyX3NlY3Rpb25zOiBUUlVFCiAgICB0b2NfZmxvYXQ6CiAgICAgIHNtb290aF9zY3JvbGw6IEZBTFNFCiAgICB0aGVtZTogcmVhZGFibGUKICAgIGNvZGVfZG93bmxvYWQ6IHRydWUKLS0tCgojIFNldHVwICB7LnVubnVtYmVyZWR9CgpgYGB7ciBzZXR1cCwgaW5jbHVkZT1GQUxTRX0Ka25pdHI6Om9wdHNfY2h1bmskc2V0KGNhY2hlID0gRkFMU0UsIGVjaG8gPSBUUlVFLCBjb21tZW50PU5BLCBmaWcuYWxpZ249ImNlbnRlciIpCmBgYAoKKipMb2FkIHBhY2thZ2VzKioKYGBge3IgbG9hZC1wYWNrYWdlcywgbWVzc2FnZT1GQUxTRSwgd2FybmluZz1GQUxTRX0KbGlicmFyeShyc3RhbikKbGlicmFyeShnZ3Bsb3QyKQpsaWJyYXJ5KGJheWVzcGxvdCkKdGhlbWVfc2V0KGJheWVzcGxvdDo6dGhlbWVfZGVmYXVsdChiYXNlX2ZhbWlseSA9ICJzYW5zIikpCmxpYnJhcnkobG9vKQpsaWJyYXJ5KHJwcm9qcm9vdCkKcm9vdDwtaGFzX2ZpbGUoIi5CREFfUl9kZW1vc19yb290IikkbWFrZV9maXhfZmlsZSgpCmBgYAoKCiMgTG9hZCBhbmQgZXhhbWluZSBkYXRhCmBgYHtyIHBvaXNzb24tZGF0YSwgZmlnLndpZHRoID0gNCwgZmlnLmhlaWdodCA9IDR9CnNvdXJjZShyb290KCJkZW1vc19yc3Rhbi9wcGMiLCAiY291bnQtZGF0YS5SIikpCnByaW50KE4pCnByaW50KHkpCnFwbG90KHkpCmBgYAoKIyMgUGxvdCBoaXN0b2dyYW0gb2YgZHJhd3MgZnJvbSBQb2lzc29uIHdpdGggc2FtZSBtZWFuCgpgYGB7ciBwbG90LXgsIGZpZy53aWR0aCA9IDQsIGZpZy5oZWlnaHQgPSA0fQp4IDwtIHJwb2lzKE4sIG1lYW4oeSkpCnFwbG90KHgpCmBgYAoKYGBge3IgcGxvdC15LXgsIG1lc3NhZ2U9RkFMU0V9CnBsb3RkYXRhIDwtIGRhdGEuZnJhbWUoCiAgdmFsdWUgPSBjKHksIHgpLCAKICB2YXJpYWJsZSA9IHJlcChjKCJPdXIgZGF0YSIsICJQb2lzc29uIGRhdGEiKSwgZWFjaCA9IE4pCikKZ2dwbG90KHBsb3RkYXRhLCBhZXMoeCA9IHZhbHVlLCBjb2xvciA9IHZhcmlhYmxlKSkgKyAKICBnZW9tX2ZyZXFwb2x5KGJpbndpZHRoID0gMC41KSArCiAgc2NhbGVfeF9jb250aW51b3VzKG5hbWUgPSAiIiwgYnJlYWtzID0gMDptYXgoeCx5KSkgKwogIHNjYWxlX2NvbG9yX21hbnVhbChuYW1lID0gIiIsIHZhbHVlcyA9IGMoImdyYXkzMCIsICJwdXJwbGUiKSkKYGBgCgojIEZpdCBiYXNpYyBQb2lzc29uIG1vZGVsCgpFdmVuIHRob3VnaCB3ZSBhbHJlYWR5IHN1c3BlY3QgaXQgd29uJ3QgYmUgYSBnb29kIG1vZGVsIGZvciB0aGlzIGRhdGEsIGl0J3MKc3RpbGwgYSBnb29kIGlkZWEgdG8gc3RhcnQgYnkgZml0dGluZyB0aGUgc2ltcGxlc3QgUG9pc3NvbiBtb2RlbC4gRnJvbSB0aGVyZSB3ZQpjYW4gdGhlbiBpZGVudGlmeSBpbiB3aGljaCB3YXlzIHRoZSBtb2RlbCBpcyBpbmFkZXF1YXRlLgpgYGB7cn0KY29kZV9wb2lzc29uIDwtIHJvb3QoImRlbW9zX3JzdGFuL3BwYyIsICJwb2lzc29uLXNpbXBsZS5zdGFuIikKd3JpdGVMaW5lcyhyZWFkTGluZXMoY29kZV9wb2lzc29uKSkKYGBgCgoKYGBge3IsIGZpdH0KZml0IDwtIHN0YW4oZmlsZSA9IGNvZGVfcG9pc3NvbiwgZGF0YSA9IGMoInkiLCAiTiIpLCByZWZyZXNoID0gMCkKbW9uaXRvcihhcy5hcnJheShmaXQpWywsYygibGFtYmRhIiwibHBfXyIpXSkKYGBgCgojIyBMb29rIGF0IHBvc3RlcmlvciBkaXN0cmlidXRpb24gb2YgbGFtYmRhCgpgYGB7ciwgcGxvdC1sYW1iZGF9CmNvbG9yX3NjaGVtZV9zZXQoImJyaWdodGJsdWUiKSAjIGNoZWNrIG91dCA/YmF5ZXNwbG90Ojpjb2xvcl9zY2hlbWVfc2V0CmxhbWJkYV9kcmF3cyA8LSBhcy5tYXRyaXgoZml0LCBwYXJzID0gImxhbWJkYSIpCm1jbWNfYXJlYXMobGFtYmRhX2RyYXdzLCBwcm9iID0gMC44KSAjIGNvbG9yIDgwJSBpbnRlcnZhbApgYGAKCiMjIENoZWNrIHN1bW1hcnkgb2YgbGFtYmRhIGFuZCBjb21wYXJlIHRvIG1lYW4gb2YgdGhlIGRhdGEKYGBge3IsIHByaW50LWZpdH0KcHJpbnQoZml0LCBwYXJzID0gImxhbWJkYSIpCm1lYW4oeSkKYGBgClRoZSBtb2RlbCBnZXRzIHRoZSBtZWFuIHJpZ2h0LCBidXQsIGFzIHdlJ2xsIHNlZSBuZXh0LCB0aGUgbW9kZWwgaXMgcXVpdGUgYmFkCmF0IHByZWRpY3RpbmcgdGhlIG91dGNvbWUuCgojIEdyYXBoaWNhbCBwb3N0ZXJpb3IgcHJlZGljdGl2ZSBjaGVja3MKCiMjIEV4dHJhY3QgYHlfcmVwYCBkcmF3cyBmcm9tIHRoZSBmaXR0ZWQgbW9kZWwgb2JqZWN0CgpgYGB7ciB5X3JlcH0KeV9yZXAgPC0gYXMubWF0cml4KGZpdCwgcGFycyA9ICJ5X3JlcCIpCgojIG51bWJlciBvZiByb3dzID0gbnVtYmVyIG9mIHBvc3Qtd2FybXVwIHBvc3RlcmlvciBkcmF3cwojIG51bWJlciBvZiBjb2x1bW5zID0gbGVuZ3RoKHkpCmRpbSh5X3JlcCkgCmBgYAoKIyMgQ29tcGFyZSBoaXN0b2dyYW0gb2YgYHlgIHRvIGhpc3RvZ3JhbXMgb2Ygc2V2ZXJhbCBgeV9yZXBgcwoKYGBge3IgcHBjLWhpc3QsIG1lc3NhZ2U9RkFMU0V9CnBwY19oaXN0KHksIHlfcmVwWzE6OCwgXSwgYmlud2lkdGggPSAxKQpgYGAKCiMjIENvbXBhcmUgZGVuc2l0eSBlc3RpbWF0ZSBvZiBgeWAgdG8gZGVuc2l0eSBlc3RpbWF0ZXMgb2YgYSBidW5jaCBvZiBgeV9yZXBgcwoKYGBge3IgcHBjLWRlbnMtb3ZlcmxheX0KcHBjX2RlbnNfb3ZlcmxheSh5LCB5X3JlcFsxOjUwLCBdKQpgYGAKCiMjIFJvb3RvZ3JhbSBzaG93cyB0aGUgc3F1YXJlIHJvb3Qgb2YgY291bnRzCgpSb290b2dyYW0gc2hvd3MgdGhlIHNxdWFyZSByb290IG9mIGNvdW50cywgd2l0aCBoaXN0b2dyYW0gZm9yIHRoZSBvYnNlcnZlZCBkYXRhIGFuZCBhbmQgbW9kZWwgcHJlZGljdGlvbnMgKHlyZXApIGFzIGEgcmliYm9uCgpgYGB7ciBwcGMtcm9vdG9ncmFtfQpwcGNfcm9vdG9ncmFtKHksIHlfcmVwKQpgYGAKCiMjIENvbXBhcmUgcHJvcG9ydGlvbiBvZiB6ZXJvcyBpbiBgeWAgdG8gdGhlIGRpc3RyaWJ1dGlvbiBvZiB0aGF0IHByb3BvcnRpb24gb3ZlciBhbGwgYHlfcmVwYHMKCmBgYHtyIHByb3AtemVybywgbWVzc2FnZT1GQUxTRX0KcHJvcF96ZXJvIDwtIGZ1bmN0aW9uKHgpIG1lYW4oeCA9PSAwKQpwcmludChwcm9wX3plcm8oeSkpCgpwcGNfc3RhdCh5LCB5X3JlcCwgc3RhdCA9ICJwcm9wX3plcm8iKQpgYGAKCiMjIFNvbWUgb3RoZXIgY2hlY2tzCgpMb29rIGF0IHR3byBzdGF0aXN0aWNzIGluIGEgc2NhdHRlcnBsb3Q6CgpgYGB7ciBzdGF0LTJkfQpwcGNfc3RhdF8yZCh5LCB5X3JlcCwgc3RhdCA9IGMoIm1lYW4iLCAic2QiKSkKYGBgCgpEaXN0cmlidXRpb25zIG9mIHByZWRpY3RpdmUgZXJyb3JzOgoKYGBge3IsIHByZWRpY3RpdmUtZXJyb3JzfQpwcGNfZXJyb3JfaGlzdCh5LCB5X3JlcFsxOjQsIF0sIGJpbndpZHRoID0gMSkgKyB4bGltKC0xNSwgMTUpCmBgYAoKIyBGaXQgUG9pc3NvbiAiaHVyZGxlIiBtb2RlbCAoYWxzbyB3aXRoIHRydW5jYXRpb24gZnJvbSBhYm92ZSkKClRoaXMgbW9kZWwgc2F5cyB0aGF0IHRoZXJlIGlzIHNvbWUgcHJvYmFiaWxpdHkgYHRoZXRhYCB0aGF0IGB5YAppcyB6ZXJvIGFuZCBwcm9iYWJpbGl0eSBgMSAtIHRoZXRhYCB0aGF0IGB5YCBpcyBwb3NpdGl2ZS4gCkNvbmRpdGlvbmFsIG9uIG9ic2VydmluZyBhIHBvc2l0aXZlIGB5YCwgd2UgdXNlIGEgdHJ1bmNhdGVkIApQb2lzc29uCmBgYAp5W25dIH4gUG9pc3NvbihsYW1iZGEpIFRbMSwgVV07CmBgYAp3aGVyZSBgVFsxLFVdYCBpbmRpY2F0ZXMgdHJ1bmNhdGlvbiB3aXRoIGxvd2VyIGJvdW5kIGAxYCBhbmQgdXBwZXIgYm91bmQgYFVgLCAKd2hpY2ggZm9yIHNpbXBsaWNpdHkgd2UnbGwgX2Fzc3VtZV8gaXMgYG1heCh5KWAuCgpgYGB7cn0KY29kZV9wb2lzc29uX2h1cmRsZSA8LSByb290KCJkZW1vc19yc3Rhbi9wcGMiLCAicG9pc3Nvbi1odXJkbGUuc3RhbiIpCndyaXRlTGluZXMocmVhZExpbmVzKGNvZGVfcG9pc3Nvbl9odXJkbGUpKQpgYGAKCgpgYGB7ciBmaXQtMn0KZml0MiA8LSBzdGFuKGZpbGUgPSBjb2RlX3BvaXNzb25faHVyZGxlLCBkYXRhID0gYygieSIsICJOIiksIHJlZnJlc2ggPSAwKQpgYGAKCmBgYHtyLCBwcmludC1maXQyfQpwcmludChmaXQyLCBwYXJzID0gYygibGFtYmRhIiwgInRoZXRhIikpCmBgYAoKIyMgQ29tcGFyZSBwb3N0ZXJpb3IgZGlzdHJpYnV0aW9ucyBvZiBsYW1iZGEgZnJvbSB0aGUgdHdvIG1vZGVscwpgYGB7ciwgY29tcGFyZS1sYW1iZGFzfQpsYW1iZGFfZHJhd3MyIDwtIGFzLm1hdHJpeChmaXQyLCBwYXJzID0gImxhbWJkYSIpCmxhbWJkYXMgPC0gY2JpbmQobGFtYmRhX2ZpdDEgPSBsYW1iZGFfZHJhd3NbLCAxXSwKICAgICAgICAgICAgICAgICBsYW1iZGFfZml0MiA9IGxhbWJkYV9kcmF3czJbLCAxXSkKbWNtY19hcmVhcyhsYW1iZGFzLCBwcm9iID0gMC44KSAjIGNvbG9yIDgwJSBpbnRlcnZhbApgYGAKCiMgUG9zdGVyaW9yIHByZWRpY3RpdmUgY2hlY2tzIGFnYWluCgpTYW1lIHBsb3RzIGFzIGJlZm9yZSAoYW5kIGEgZmV3IG90aGVycyksIGJ1dCB0aGlzIHRpbWUgdXNpbmcgYHlfcmVwYCBmcm9tIGBmaXQyYC4KRXZlcnl0aGluZyBsb29rcyBtdWNoIG1vcmUgcmVhc29uYWJsZToKCmBgYHtyIHBwYy1oaXN0LTIsIG1lc3NhZ2U9RkFMU0V9CnlfcmVwMiA8LSBhcy5tYXRyaXgoZml0MiwgcGFycyA9ICJ5X3JlcCIpCnBwY19oaXN0KHksIHlfcmVwMlsxOjgsIF0sIGJpbndpZHRoID0gMSkKYGBgCgpgYGB7ciBwcGMtZGVucy1vdmVybGF5LTJ9CnBwY19kZW5zX292ZXJsYXkoeSwgeV9yZXAyWzE6NTAsIF0pCmBgYAoKYGBge3IgcHBjLXJvb3RvZ3JhbS0yfQpwcGNfcm9vdG9ncmFtKHksIHlfcmVwMikKYGBgCgpgYGB7ciwgcHJvcC16ZXJvLTIsIG1lc3NhZ2U9RkFMU0V9CnBwY19zdGF0KHksIHlfcmVwMiwgc3RhdCA9ICJwcm9wX3plcm8iKQpgYGAKCgpgYGB7ciwgbW9yZS1jaGVja3MsIG1lc3NhZ2U9RkFMU0V9CnBwY19zdGF0XzJkKHksIHlfcmVwMiwgc3RhdCA9IGMoIm1lYW4iLCAic2QiKSkKcHBjX2Vycm9yX2hpc3QoeSwgeV9yZXAyW3NhbXBsZShucm93KHlfcmVwMiksIDQpLCBdLCBiaW53aWR0aCA9IDEpCmBgYAoKIyBDb21wYXJpc29uIHdpdGggbGVhdmUtb25lLW91dCBjcm9zcy12YWxpZGF0aW9uCgpDb21wYXJlIHByZWRpY3RpdmUgcGVyZm9ybWFuY2Ugd2l0aCBMT08uCgpgYGB7cn0KKGxvbzEgPC0gbG9vKGZpdCkpCihsb28yIDwtIGxvbyhmaXQyKSkKbG9vX2NvbXBhcmUobG9vMSwgbG9vMikKYGBgCgpNb2RlbCAyIGlzIGEgY2xlYXIgd2lubmVyIGluIHRoZSBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlLgoK