Skip to contents
etf <- etf_vix[1:55, 1:3]
# Split-------------------------------
h <- 5
etf_eval <- divide_ts(etf, h)
etf_train <- etf_eval$train
etf_test <- etf_eval$test

Bayesian VAR and VHAR

var_bayes() and vhar_bayes() fit BVAR and BVHAR each with various priors.

  • y: Multivariate time series data. It should be data frame or matrix, which means that every column is numeric. Each column indicates variable, i.e. it sould be wide format.
  • p or har: VAR lag, or order of VHAR
  • num_chains: Number of chains
    • If OpenMP is enabled, parallel loop will be run.
  • num_iter: Total number of iterations
  • num_burn: Number of burn-in
  • thinning: Thinning
  • bayes_spec: Output of set_ssvs()
  • cov_spec: Covariance prior specification. Use set_ldlt() for homoskedastic model.
  • include_mean = TRUE: By default, you include the constant term in the model.
  • minnesota = c("no", "short", "longrun"): Minnesota-type shrinkage.
  • verbose = FALSE: Progress bar
  • num_thread: Number of thread for OpenMP
    • Used in parallel multi-chain loop
    • This option is valid only when OpenMP in user’s machine.

Stochastic Search Variable Selection (SSVS) Prior

(fit_ssvs <- vhar_bayes(etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 1, num_iter = 20, bayes_spec = set_ssvs(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with SSVS prior
#> Fitted by Gibbs sampling
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 1 chains, and 90 variables
#>      phi[1]   phi[2]    phi[3]   phi[4]     phi[5]     phi[6]    phi[7]
#> 1    0.3847   0.0438  -0.27739   0.2219  -0.000547   0.086491  -0.00246
#> 2    0.0714  -0.2113  -0.00678   0.2742  -0.020918  -0.050206  -0.07369
#> 3   -0.2201   0.4460  -0.36137   0.0270  -0.521807  -0.000639   0.77706
#> 4    0.0897  -0.6214   0.04576  -0.7916   0.254891  -0.121410   0.89058
#> 5   -0.2125  -0.2902   0.11610  -0.9549   0.049153  -0.341027   1.14674
#> 6   -0.0499  -0.1676  -0.16256   0.0643   0.129225  -0.278598   0.95348
#> 7    0.4560   0.2951  -0.18927  -0.1657  -0.371752  -0.185117   0.18339
#> 8   -0.1574  -0.0717  -0.15395   0.4188   0.312408   0.033258   0.19845
#> 9    0.2302   0.0496  -0.17701  -0.3325  -0.280847   0.079401  -0.03656
#> 10  -0.0181   0.1486  -0.33310   0.4592   0.246448   0.177874   0.30441
#>      phi[8]
#> 1    0.0012
#> 2   -0.2846
#> 3    0.5663
#> 4    0.4800
#> 5    0.4987
#> 6    0.6375
#> 7    0.4945
#> 8    0.8291
#> 9    0.4264
#> 10   0.5300
#> # ... with 82 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

autoplot() for the fit (bvharsp object) provides coefficients heatmap. There is type argument, and the default type = "coef" draws the heatmap.

autoplot(fit_ssvs)

Horseshoe Prior

bayes_spec is the initial specification by set_horseshoe(). Others are the same.

(fit_hs <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_horseshoe(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with Horseshoe prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 124 variables
#>      phi[1]    phi[2]    phi[3]   phi[4]  phi[5]  phi[6]    phi[7]     phi[8]
#> 1    0.0784  -0.27321  -0.00843  -0.0246   0.560   0.467   0.73729   0.227085
#> 2   -0.2107  -0.36372  -0.00353   0.0378   0.038   0.230   0.35817  -0.217870
#> 3    0.1568  -0.08613   0.00207  -0.8498   1.113   0.711  -0.03426   0.008767
#> 4    0.1730  -0.23023  -0.00349   0.4458   0.358   0.885  -0.06063  -0.000532
#> 5    0.0952  -0.18874  -0.00243  -0.2126   0.926   0.691   0.01630   0.004454
#> 6    0.3015  -0.13450   0.01453   0.1699   0.357   1.208  -0.00679  -0.001626
#> 7    0.1381   0.00144   0.01766  -0.3898   0.514   0.880   0.05919  -0.014962
#> 8    0.2213  -0.26146   0.02653  -0.1584   0.680   0.366   0.00995  -0.111351
#> 9    0.1937  -0.21985   0.08145  -0.1757   0.566   0.588   0.63415  -0.056310
#> 10   0.2042  -0.19065   0.01578   0.2702   0.569   0.542   0.27934  -0.182732
#> # ... with 10 more draws, and 116 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}
autoplot(fit_hs)

Minnesota Prior

(fit_mn <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_bvhar(lambda = set_lambda()), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with MN_Hierarchical prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 63 variables
#>      phi[1]    phi[2]    phi[3]   phi[4]   phi[5]  phi[6]  phi[7]   phi[8]
#> 1   -0.0233  -0.16194   0.11825   0.2772   0.1300   1.098  0.5572   0.1173
#> 2    0.0486  -0.19665  -0.00183   0.0411   0.1851   1.114  0.6318   0.2012
#> 3    0.2687  -0.20244   0.24069   0.2050   0.1636   0.657  0.0455   0.0784
#> 4   -0.2079  -0.29335   0.14632   0.3239   0.3756   1.017  1.0081   0.0591
#> 5    0.3382  -0.27744  -0.22038  -0.4747   0.4020   0.768  0.3535   0.0929
#> 6    0.3135  -0.26244   0.07382   0.1641  -0.0796   1.077  0.4244   0.1419
#> 7    0.2693  -0.00653   0.35265   0.2064   0.0599   0.420  0.1562  -0.1135
#> 8    0.1704  -0.41107  -0.42766  -0.1914   0.5046   0.907  0.2155  -0.0079
#> 9    0.1911  -0.25431  -0.02334   0.2605   0.0925   0.797  0.5784   0.1229
#> 10   0.5261  -0.25419   0.09872   0.0215   0.1997   0.718  0.3038   0.0260
#> # ... with 10 more draws, and 55 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Normal-Gamma prior

(fit_ng <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_ng(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with NG prior
#> Fitted by Metropolis-within-Gibbs
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 97 variables
#>       phi[1]    phi[2]   phi[3]   phi[4]     phi[5]  phi[6]    phi[7]    phi[8]
#> 1    0.42392  -0.21878   0.2556   0.6214   0.003023   0.752   0.06803   0.14119
#> 2   -0.00465  -0.03564   0.1146   0.1574   0.290536   0.473   1.37919   0.05171
#> 3   -0.05017   0.10621   0.7440   1.0126  -0.002730   0.979   0.00334   0.06805
#> 4    0.08434   0.11142   0.2012   0.0301   0.170836   0.744   0.27346   0.18611
#> 5    0.16315  -0.06637  -0.1811  -0.1891   0.297721   1.018   0.81096  -0.00551
#> 6    0.08758  -0.00917   0.1900   1.1043   0.001282   0.992   0.47470   0.09630
#> 7    0.06676  -0.01290  -0.4421  -0.2121   1.871956   1.322   0.73512   0.19992
#> 8    0.53834  -0.01505   0.0893  -0.0316  -0.000366   1.074   0.00198   0.05561
#> 9    0.59743  -0.00888   0.0327   0.4047   0.010257   0.724  -0.01201  -0.19310
#> 10   0.16419  -0.13100   0.5944   1.0817   0.049834   0.912   0.12788  -0.01071
#> # ... with 10 more draws, and 89 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Dirichlet-Laplace prior

(fit_dl <- vhar_bayes(etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun"))
#> Call:
#> vhar_bayes(y = etf_train, num_chains = 2, num_iter = 20, bayes_spec = set_dl(), 
#>     cov_spec = set_ldlt(), include_mean = FALSE, minnesota = "longrun")
#> 
#> BVHAR with DL prior
#> Fitted by Gibbs sampling
#> Number of chains: 2
#> Total number of iteration: 20
#> Number of burn-in: 10
#> ====================================================
#> 
#> Parameter Record:
#> # A draws_df: 10 iterations, 2 chains, and 91 variables
#>      phi[1]   phi[2]   phi[3]  phi[4]   phi[5]  phi[6]  phi[7]   phi[8]
#> 1    0.0308  -0.2196   0.1161   0.737   0.1653   0.905   0.666   0.0473
#> 2    0.3001  -0.3084  -0.3184   0.543   0.2048   0.924   0.428   0.1967
#> 3   -0.0170  -0.4520  -0.2867   0.832  -0.2763   0.680   1.211  -0.1741
#> 4    0.2589  -0.0543   0.2328   0.407  -0.2750   0.750   0.334  -0.0746
#> 5    0.1114  -0.3284  -0.0279   0.450  -0.4602   0.873   0.207  -0.0990
#> 6    0.2471  -0.2382  -0.6072   0.236   0.4191   0.109   0.260  -0.3498
#> 7    0.1552  -0.3935  -0.3105   0.141   0.0223   0.591  -0.281  -0.3767
#> 8   -0.0332  -0.0552  -0.1325  -0.361  -0.2273   0.531   0.633   0.0285
#> 9   -0.1945   0.0550   0.1631   0.908  -0.1706   0.805   0.395  -0.0490
#> 10  -0.1348  -0.3052   0.4822  -0.067  -0.3310   0.258   0.966   0.0293
#> # ... with 10 more draws, and 83 more variables
#> # ... hidden reserved variables {'.chain', '.iteration', '.draw'}

Bayesian visualization

autoplot() also provides Bayesian visualization. type = "trace" gives MCMC trace plot.

autoplot(fit_hs, type = "trace", regex_pars = "tau")

type = "dens" draws MCMC density plot. If specifying additional argument facet_args = list(dir = "v") of bayesplot, you can see plot as the same format with coefficient matrix.

autoplot(fit_hs, type = "dens", regex_pars = "kappa", facet_args = list(dir = "v", nrow = nrow(fit_hs$coefficients)))