Skip to contents
etf <- etf_vix[1:55, 1:3]
# Split-------------------------------
h <- 5
etf_eval <- divide_ts(etf, h)
etf_train <- etf_eval$train
etf_test <- etf_eval$test

Bayesian VAR and VHAR

var_bayes() and vhar_bayes() fit BVAR and BVHAR each with various priors.

  • y: Multivariate time series data. It should be data frame or matrix, which means that every column is numeric. Each column indicates variable, i.e. it sould be wide format.
  • p or har: VAR lag, or order of VHAR
  • num_chains: Number of chains
    • If OpenMP is enabled, parallel loop will be run.
  • num_iter: Total number of iterations
  • num_burn: Number of burn-in
  • thinning: Thinning
  • bayes_spec: Output of set_ssvs()
  • cov_spec: Covariance prior specification. Use set_ldlt() for homoskedastic model.
  • include_mean = TRUE: By default, you include the constant term in the model.
  • minnesota = c("no", "short", "longrun"): Minnesota-type shrinkage.
  • verbose = FALSE: Progress bar
  • num_thread: Number of thread for OpenMP
    • Used in parallel multi-chain loop
    • This option is valid only when OpenMP in user’s machine.

Stochastic Search Variable Selection (SSVS) Prior

(fit_ssvs <- vhar_bayes(etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with SSVS prior
#> Fitted by Gibbs sampling
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 1 chains, and 90 variables
#>        phi[1]  phi[2]   phi[3]    phi[4]    phi[5]   phi[6]  phi[7]  phi[8]
#> 1    0.302337  0.4543  -0.0375  -0.49548  -0.73538  -0.3439   0.162   0.567
#> 2    0.104371  0.1934  -0.2732  -0.00798  -0.19941   0.0301   0.213   0.492
#> 3    0.373726  0.8019  -0.3833  -0.64653  -1.14892   0.0196  -0.296   1.299
#> 4    0.096895  0.0861  -0.2299  -0.96209  -0.36561  -0.1900   0.754   0.902
#> 5   -0.000544  0.3906  -0.2333   0.03740  -0.38600   0.0149   0.450   0.855
#> 6    0.013843  0.2744  -0.2938   0.10003  -0.17177  -0.0979   0.702   0.922
#> 7   -0.243176  0.0134   0.0297  -0.46767  -0.38944  -0.2737   0.715   0.988
#> 8    0.119136  0.2333  -0.1710  -1.07673  -0.27910  -0.2497   1.797   0.858
#> 9   -0.123032  0.0541  -0.3807  -0.52832   0.03043   0.1313   0.684   1.394
#> 10  -0.007328  0.0410   0.0137  -0.98017   0.00218  -0.1690   1.298   1.312
#> # ... with 82 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

autoplot() for the fit (bvharsp object) provides coefficients heatmap. There is type argument, and the default type = "coef" draws the heatmap.

autoplot(fit_ssvs)

Horseshoe Prior

bayes_spec is the initial specification by set_horseshoe(). Others are the same.

(fit_hs <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with Horseshoe prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 124 variables
#>      phi[1]   phi[2]   phi[3]    phi[4]   phi[5]  phi[6]     phi[7]   phi[8]
#> 1    0.1855  -0.0384   0.0917   0.00753   0.1832   1.044   0.174144  -0.1117
#> 2    0.1331  -0.0762   0.0435  -0.00436   0.2640   0.817   0.229029  -0.0391
#> 3    0.1611  -0.1832   0.0624  -0.01279  -0.1236   0.688   0.195939  -0.0598
#> 4    0.2314  -0.0794   0.0186  -0.08791  -0.0304   1.010  -0.123500  -0.0472
#> 5    0.0946  -0.0219   0.0344   0.12698   0.0809   1.038  -0.002482   0.0370
#> 6    0.1352  -0.0795   0.0804  -0.07565   0.2018   0.910   0.001557  -0.0110
#> 7    0.0155  -0.1789  -0.0395   0.16233   0.5132   0.930  -0.000694  -0.0216
#> 8   -0.0507  -0.1769   0.0890  -0.03753   0.9218   0.768   0.003441  -0.1093
#> 9    0.2339   0.0354   0.0272   0.08280   0.5135   0.982  -0.009063   0.0401
#> 10   0.2393   0.0170   0.0167  -0.05259   0.6156   0.754  -0.010166  -0.0214
#> # ... with 10 more draws, and 116 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}
autoplot(fit_hs)

Minnesota Prior

(fit_mn <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with MN_Hierarchical prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 63 variables
#>     phi[1]   phi[2]   phi[3]   phi[4]    phi[5]  phi[6]   phi[7]   phi[8]
#> 1   0.0574  -0.2289  -0.0225  -0.2779  -0.04506   1.056   0.1102  -0.0640
#> 2   0.0923  -0.1600  -0.1074  -0.0168  -0.06174   1.098   0.5046   0.0684
#> 3   0.3472  -0.0332  -0.2040   0.1687   0.68539   0.842   0.0862  -0.0295
#> 4   0.1847   0.0239   0.2598  -0.0595  -0.00376   0.641  -0.3836  -0.2750
#> 5   0.1223  -0.1913  -0.0138   0.2144   0.49709   0.946   0.7645   0.3838
#> 6   0.0722  -0.1736   0.0873   0.0605   0.35205   0.680   0.0216  -0.0560
#> 7   0.2092   0.2399   0.4140   0.2103   0.34400   0.678   0.4793  -0.1857
#> 8   0.3781  -0.2364   0.2203  -0.3704   0.09095   0.718   0.5853  -0.1933
#> 9   0.2177  -0.0415  -0.1745   0.2458   0.14458   0.704   0.2544  -0.0170
#> 10  0.1605  -0.0599   0.1923   0.2352   0.19033   0.986  -0.5050  -0.3299
#> # ... with 10 more draws, and 55 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Normal-Gamma prior

(fit_ng <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with NG prior
#> Fitted by Metropolis-within-Gibbs
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 97 variables
#>      phi[1]     phi[2]    phi[3]    phi[4]   phi[5]  phi[6]   phi[7]    phi[8]
#> 1    0.0239  -0.000371  -0.00774   0.30406  -0.4156   0.180   0.5711  -0.00505
#> 2    0.1351  -0.000892  -0.18515  -0.00503   0.8094   0.612   1.0913   0.05370
#> 3    0.1919  -0.000979   0.18403   0.06152   0.3535   0.479   1.4394   0.03016
#> 4   -0.0180  -0.000224  -0.12140   0.50746   0.6493   0.442   0.2520  -0.07450
#> 5    0.0167  -0.000580  -0.05948  -0.05656   1.0994   0.253   0.0555  -0.19975
#> 6    0.2365  -0.004033  -0.07897  -0.01271   0.6498   0.279   0.0364  -0.32809
#> 7    0.2488   0.004425  -0.04808  -0.41173   0.5168   0.659   0.7106  -0.05164
#> 8    0.1466   0.015549  -0.04945  -0.03920   0.0513   0.847  -0.0742  -0.22091
#> 9    0.0920   0.017098  -0.06370  -0.01558   0.2960   0.683  -0.1103  -0.11615
#> 10   0.0398  -0.090584   0.13516  -0.29201   0.3678   1.030   0.6488  -0.31626
#> # ... with 10 more draws, and 89 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Dirichlet-Laplace prior

(fit_dl <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with DL prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 91 variables
#>       phi[1]    phi[2]  phi[3]    phi[4]    phi[5]  phi[6]    phi[7]    phi[8]
#> 1    0.05582  -0.04964  0.0620   0.78639   0.00352   0.962   0.00321  -0.01761
#> 2    0.00879  -0.02344  0.0921  -0.50203   0.72942   0.939  -0.00431   0.02157
#> 3    0.11592  -0.00327  0.2726  -0.06332   0.19191   1.003  -0.02839  -0.09357
#> 4    0.44673   0.00220  0.0340  -0.02172   0.33265   0.839   0.37484   0.00943
#> 5    0.47248   0.00293  0.0342  -0.00848   0.43971   1.152   0.24066   0.03827
#> 6    0.17764  -0.00270  0.5839  -0.04313  -0.07198   0.841   0.27472  -0.09927
#> 7    0.17417  -0.01040  0.2732   0.00145  -0.02923   0.966  -0.07610  -0.10401
#> 8   -0.02180   0.04319  0.2226  -0.00164  -0.00496   0.835  -0.01362  -0.06213
#> 9    0.07888   0.03103  0.2318  -0.00266  -0.00281   1.026   0.00658  -0.23399
#> 10   0.01099  -0.18264  0.2256   0.03739   0.01859   1.204  -0.02366  -0.13103
#> # ... with 10 more draws, and 83 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Bayesian visualization

autoplot() also provides Bayesian visualization. type = "trace" gives MCMC trace plot.

autoplot(fit_hs, type = "trace", regex_pars = "tau")

type = "dens" draws MCMC density plot. If specifying additional argument facet_args = list(dir = "v") of bayesplot, you can see plot as the same format with coefficient matrix.

autoplot(fit_hs, type = "dens", regex_pars = "kappa", facet_args = list(dir = "v", nrow = nrow(fit_hs$coefficients)))