Skip to contents
etf <- etf_vix[1:55, 1:3]
# Split-------------------------------
h <- 5
etf_eval <- divide_ts(etf, h)
etf_train <- etf_eval$train
etf_test <- etf_eval$test

Bayesian VAR and VHAR

var_bayes() and vhar_bayes() fit BVAR and BVHAR each with various priors.

  • y: Multivariate time series data. It should be data frame or matrix, which means that every column is numeric. Each column indicates variable, i.e. it sould be wide format.
  • p or har: VAR lag, or order of VHAR
  • num_chains: Number of chains
    • If OpenMP is enabled, parallel loop will be run.
  • num_iter: Total number of iterations
  • num_burn: Number of burn-in
  • thinning: Thinning
  • bayes_spec: Output of set_ssvs()
  • cov_spec: Covariance prior specification. Use set_ldlt() for homoskedastic model.
  • include_mean = TRUE: By default, you include the constant term in the model.
  • minnesota = c("no", "short", "longrun"): Minnesota-type shrinkage.
  • verbose = FALSE: Progress bar
  • num_thread: Number of thread for OpenMP
    • Used in parallel multi-chain loop
    • This option is valid only when OpenMP in user’s machine.

Stochastic Search Variable Selection (SSVS) Prior

(fit_ssvs <- vhar_bayes(etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with SSVS prior
#> Fitted by Gibbs sampling
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 1 chains, and 90 variables
#>        phi[1]  phi[2]   phi[3]    phi[4]    phi[5]   phi[6]  phi[7]  phi[8]
#> 1    0.302333  0.4543  -0.0375  -0.49544  -0.73536  -0.3439   0.162   0.567
#> 2    0.104370  0.1934  -0.2732  -0.00793  -0.19941   0.0301   0.213   0.492
#> 3    0.373717  0.8019  -0.3833  -0.64650  -1.14889   0.0196  -0.296   1.299
#> 4    0.096888  0.0861  -0.2299  -0.96204  -0.36559  -0.1900   0.754   0.902
#> 5   -0.000546  0.3905  -0.2333   0.03746  -0.38598   0.0149   0.450   0.855
#> 6    0.013841  0.2744  -0.2938   0.10004  -0.17177  -0.0979   0.702   0.922
#> 7   -0.243175  0.0134   0.0297  -0.46766  -0.38944  -0.2737   0.715   0.988
#> 8    0.119138  0.2333  -0.1711  -1.07667  -0.27909  -0.2497   1.797   0.858
#> 9   -0.123037  0.0541  -0.3807  -0.52814   0.03044   0.1314   0.684   1.394
#> 10  -0.007333  0.0410   0.0137  -0.97990   0.00219  -0.1690   1.298   1.312
#> # ... with 82 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

autoplot() for the fit (bvharsp object) provides coefficients heatmap. There is type argument, and the default type = "coef" draws the heatmap.

autoplot(fit_ssvs)

Horseshoe Prior

bayes_spec is the initial specification by set_horseshoe(). Others are the same.

(fit_hs <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with Horseshoe prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 124 variables
#>      phi[1]   phi[2]   phi[3]    phi[4]   phi[5]  phi[6]     phi[7]   phi[8]
#> 1    0.1855  -0.0384   0.0917   0.00753   0.1832   1.044   0.174094  -0.1117
#> 2    0.1331  -0.0762   0.0435  -0.00436   0.2640   0.817   0.228978  -0.0391
#> 3    0.1611  -0.1832   0.0624  -0.01279  -0.1236   0.688   0.195932  -0.0598
#> 4    0.2314  -0.0794   0.0186  -0.08791  -0.0304   1.010  -0.123494  -0.0472
#> 5    0.0946  -0.0219   0.0344   0.12698   0.0809   1.038  -0.002477   0.0370
#> 6    0.1352  -0.0795   0.0804  -0.07565   0.2018   0.910   0.001557  -0.0110
#> 7    0.0155  -0.1789  -0.0395   0.16233   0.5131   0.930  -0.000694  -0.0216
#> 8   -0.0507  -0.1769   0.0890  -0.03753   0.9218   0.768   0.003441  -0.1093
#> 9    0.2339   0.0354   0.0272   0.08280   0.5135   0.982  -0.009061   0.0401
#> 10   0.2393   0.0170   0.0167  -0.05259   0.6156   0.754  -0.010164  -0.0214
#> # ... with 10 more draws, and 116 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}
autoplot(fit_hs)

Minnesota Prior

(fit_mn <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with MN_Hierarchical prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 63 variables
#>     phi[1]  phi[2]    phi[3]   phi[4]    phi[5]  phi[6]   phi[7]    phi[8]
#> 1   0.1686  -0.143   0.13034  -0.0657  -0.11305   0.661   0.5624   0.10648
#> 2   0.1005  -0.269  -0.14573   0.0930   0.16561   0.735   0.3021  -0.00244
#> 3   0.1220  -0.170   0.41477  -0.0598   0.04038   0.895   0.2439   0.12261
#> 4   0.4330  -0.288  -0.06231   0.3149   0.09932   0.676   0.3609  -0.12742
#> 5   0.2204  -0.340   0.24797   0.1936  -0.00245   0.770   0.3964   0.51817
#> 6   0.0512  -0.149   0.06390   0.1700   0.09526   0.941  -0.0637   0.19208
#> 7   0.2391  -0.141  -0.08897   0.1985   0.06612   0.805   0.6443   0.02353
#> 8   0.0730  -0.134   0.28012  -0.0270   0.02806   1.214   0.0161   0.34295
#> 9   0.1150  -0.153  -0.00198  -0.0903   0.21183   0.788   0.2147   0.05750
#> 10  0.3004  -0.121   0.09600   0.1277   0.08608   0.994  -0.1748   0.07166
#> # ... with 10 more draws, and 55 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Normal-Gamma prior

(fit_ng <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with NG prior
#> Fitted by Metropolis-within-Gibbs
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 97 variables
#>       phi[1]     phi[2]   phi[3]    phi[4]   phi[5]  phi[6]     phi[7]
#> 1   -0.13656  -0.147652  -0.0798  -0.01935  -0.1942   0.840  -0.042350
#> 2   -0.08568  -0.029476  -0.1201  -0.56230   0.5928   0.820   0.009068
#> 3    0.20467  -0.000905  -0.0655  -0.02796   0.3851   0.473  -0.000271
#> 4    0.04156  -0.000212   0.3434   0.55208  -0.4819   0.629   0.000452
#> 5    0.00406   0.011668   0.1263   0.81001   0.0257   0.435  -0.000350
#> 6    0.00838   0.002449  -0.1005   0.09922   0.8598   0.174   0.000472
#> 7    0.00269   0.032985   0.0643   0.29943   0.5240   0.260   0.000834
#> 8   -0.03945  -0.046687  -0.0305   0.15665   0.4438   0.374  -0.002324
#> 9    0.01818   0.000556   0.0092   0.13773   0.9453   0.376   0.026537
#> 10  -0.08646  -0.000340   0.0101   0.00398   1.0015   0.715   0.054736
#>       phi[8]
#> 1   -0.00981
#> 2   -0.00975
#> 3   -0.02623
#> 4   -0.06663
#> 5   -0.19023
#> 6   -0.21609
#> 7   -0.00728
#> 8    0.00117
#> 9    0.00176
#> 10   0.00143
#> # ... with 10 more draws, and 89 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Dirichlet-Laplace prior

(fit_dl <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with DL prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 91 variables
#>        phi[1]     phi[2]   phi[3]     phi[4]    phi[5]  phi[6]     phi[7]
#> 1    0.060750  -3.65e-06   0.0482   0.192537   0.04231   0.719  -1.52e-06
#> 2   -0.008709  -6.92e-06   0.1405  -0.067433  -0.13138   0.595   3.91e-06
#> 3    0.000974  -6.48e-04  -0.1605   0.053017   0.02757   0.516   2.88e-06
#> 4   -0.002808  -3.96e-03   0.3150   0.206737   0.00485   0.540   7.79e-07
#> 5   -0.002665  -7.35e-02   0.1263   0.192209  -0.01511   0.605   7.96e-07
#> 6   -0.000491  -5.66e-02   0.0178  -0.008705  -0.02227   0.437   1.89e-07
#> 7   -0.000465  -8.49e-02   0.0655   0.000784  -0.00668   0.713  -7.22e-07
#> 8    0.000411  -7.63e-02   0.0364  -0.000125  -0.04994   0.747   2.52e-09
#> 9   -0.000232  -3.67e-02  -0.0246   0.000278   0.11802   0.960   2.01e-09
#> 10   0.000133  -4.26e-02  -0.0129   0.025109  -0.13398   0.775   2.14e-09
#>      phi[8]
#> 1   -0.1756
#> 2   -0.2059
#> 3   -0.3187
#> 4   -0.2294
#> 5   -0.1822
#> 6   -0.1406
#> 7    0.0058
#> 8   -0.1003
#> 9   -0.1328
#> 10  -0.0781
#> # ... with 10 more draws, and 83 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Bayesian visualization

autoplot() also provides Bayesian visualization. type = "trace" gives MCMC trace plot.

autoplot(fit_hs, type = "trace", regex_pars = "tau")

type = "dens" draws MCMC density plot. If specifying additional argument facet_args = list(dir = "v") of bayesplot, you can see plot as the same format with coefficient matrix.

autoplot(fit_hs, type = "dens", regex_pars = "kappa", facet_args = list(dir = "v", nrow = nrow(fit_hs$coefficients)))