# For detailed information about implementation of Bayesian models see Lee & Wagenmakers (2013) (https://bayesmodels.com) linear_regression_BF <-' data { int n_data; // number of data items int n_cat; // number of predictors matrix[n_data, n_cat] X; // predictor vector[n_data] y; // outcome variable } parameters { real alpha; real delta; // directed hypothesis, delta is positive real sigma; } transformed parameters { real beta; beta <- delta * sigma; } model { //Priors delta ~ cauchy(0, 1)T[0,]; sigma ~ cauchy(0, 1); for (i in 1:n_data) { y[i] ~ normal(X[i, 1] * alpha + X[i, 2] * beta , sigma); } } ' # calling the model stanfit = stan(model_code = linear_regression_BF, data = standata, chains = 4, cores = parallel::detectCores(), iter = 20000, thin = 4) # Function for calculating the Bayes Factor BF_directed <- function(sample){ delta.posterior <- rstan::extract(sample)$delta fit.posterior <- logspline(delta.posterior, lbound=0) #directed hypothesis, delta is positive # 95% confidence interval: x0 <- qlogspline(0.025,fit.posterior) x1 <- qlogspline(0.975,fit.posterior) posterior <- dlogspline(0, fit.posterior) prior <- 2*dcauchy(0, scale=sqrt(2)/2) BF01 <- posterior/prior # evidence for H0 BF01 } # Calculating the Bayes factor BF <- BF_directed(stanfit) 1/BF # evidence for H1