#The model is based on the Bayen et al. 1996 model 6d ## theta[s, 1] = D1 ## theta[s, 2] = D2 ##theta[s, 2] = D3 = D1 ## theta[s, 3] = d1 ## theta[s, 4]= d2 ## theta[s, 5] = a ## theta[s, 6] = b ## theta[s, 5] = g = a model{ ## loop over items for (s in 1:n_items){ ## subtree 1 (shown complete): category probabilities Prob_C[s, 1] <- (theta[s, 1] * theta[s, 3]) + (theta[s, 1] * (1-theta[s, 3]) * (1-theta[s, 5])) + ((1-theta[s, 1]) * theta[s, 6] * (1-theta[s, 5])) Prob_C[s, 2] <- (theta[s, 1] * (1-theta[s, 3]) * theta[s, 5]) + ((1-theta[s, 1]) * theta[s, 6] * theta[s, 5]) Prob_C[s, 3] <- ((1-theta[s, 1]) * (1-theta[s, 6])) ## Likelihood function for subtree 1: tree_complete[s, 1:3] ~ dmulti(Prob_C[s, 1:3], C[s]) ## subtree 2 (shown halved): category probabilities Prob_H[s, 1] <- (theta[s, 2] * theta[s, 4]) + (theta[s, 2] * (1-theta[s, 4]) * theta[s, 5]) + ((1-theta[s, 2]) * theta[s, 6] * theta[s, 5]) Prob_H[s, 2] <- (theta[s, 2] * (1-theta[s, 4]) * (1-theta[s, 5])) + ((1-theta[s, 2]) * theta[s, 6] * (1-theta[s, 5])) Prob_H[s, 3] <- ((1-theta[s, 2]) * (1-theta[s, 6])) ## Likelihood function for subtree 2: tree_halved[s, 1:3] ~ dmulti(Prob_H[s, 1:3], H[s]) ## subtree 3 (no presenation (NEW)): category probabilities Prob_N[s, 1] <- theta[s, 1] + ((1- theta[s, 1]) *(1-theta[s, 6])) Prob_N[s, 2] <- (1- theta[s, 1]) *theta[s, 6] * (1-theta[s, 5]) Prob_N[s, 3] <- ((1- theta[s, 1]) *theta[s, 6] * theta[s, 5]) ## Likelihood function for subtree 3: tree_new[s, 1:3] ~ dmulti(Prob_N[s, 1:3], N[s]) for(p in 1:P) { #probit transformation of parameters theta[s, p] <- phi(theta_probit[s, p]) } for (p in 1:P){ #multivariate regression to predict mu of the distribution of probit transformed parameters theta_probit[s, p] <- b0[p] + b1[p] * x1[s]+ b2[p] * x2[s]+ xi_item[p]*delta_mu_raw[s,p] } # deviance of the stimulus parameters from the groups means delta_mu_raw[s, 1:P] ~ dmnorm(mu_delta_raw[1:P], tau_prec_mu[1:P,1:P]) pred_complete[s, 1:3] <- Prob_C[s, 1:3]*C[s] pred_halved[s, 1:3] <- Prob_H[s, 1:3]*H[s] pred_new[s, 1:3] <- Prob_N[s, 1:3]*N[s] residualC[s, 1:3] <- tree_complete[s, 1:3] - pred_complete[s, 1:3] residualH[s, 1:3] <- tree_halved[s, 1:3] - pred_halved[s, 1:3] residualN[s, 1:3] <- tree_new[s, 1:3] - pred_new[s, 1:3] sqC[s, 1:3] <- pow(residualC[s, 1:3], 2) sqH[s, 1:3] <- pow(residualH[s, 1:3], 2) sqN[s, 1:3] <- pow(residualN[s, 1:3], 2) #posterior predictions postpred_complete[s, 1:3] ~ dmulti(Prob_C[s, 1:3], C[s]) postpred_halved[s, 1:3] ~ dmulti(Prob_H[s, 1:3], H[s]) postpred_new[s, 1:3] ~ dmulti(Prob_N[s, 1:3], N[s]) residualC_post[s, 1:3] <- postpred_complete[s, 1:3] - pred_complete[s, 1:3] residualH_post[s, 1:3] <- postpred_halved[s, 1:3] - pred_halved[s, 1:3] residualN_post[s, 1:3] <- postpred_new[s, 1:3] - pred_new[s, 1:3] sqC.post[s, 1:3] <- pow(residualC_post[s, 1:3], 2) sqH.post[s, 1:3] <- pow(residualH_post[s, 1:3], 2) sqN.post[s, 1:3] <- pow(residualN_post[s, 1:3], 2) } #priors For scaling factor and regression weights for (p in 1:P){ xi_item[p] ~ dnorm(0,1) mu_delta_raw[p] <- 0 b0[p] ~ dnorm(0,1) b1[p] ~ dnorm(0,1) b2[p] ~ dnorm(0,1) } # invers wishart: prior distribution of covariance matric of the multivariate distribution of the scaling factor tau_prec_mu[1:P, 1:P] ~ dwish(W[1:P, 1:P], (P+1)) sigma_mu[1:P, 1:P] <- inverse(tau_prec_mu[1:P, 1:P]) fit <- sum(sqC[,1:3]) + sum(sqH[,1:3]) + sum(sqN[,1:3]) fit.new <- sum(sqC.post[,1:3]) + sum(sqH.post[,1:3]) + sum(sqN.post[,1:3]) test <- step(fit.new-fit) bpvalue <- mean(test) ## close model }