Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add new functions for group means and correlations #10

Merged
merged 2 commits into from
Jun 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ Manuscript/
1_IGT_PP/Code/R/1_Analyses/PA_PANAS_simple_IRT.Rmd
1_IGT_PP/Code/R/2_Plotting/posterior_predictive_checks_joint_indMu.Rmd
1_IGT_PP/Code/R/1_Analyses/fitting_orl_joint_indMu.Rmd
1_IGT_PP/Code/Stan/self_report_mus.stan
1_IGT_PP/Code/R/1_Analyses/selfreport_indMu.RMD


#
Icon?
Expand Down
2 changes: 1 addition & 1 deletion 1_IGT_PP/Code/R/1_Analyses/fitting_orl_joint.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ library(miscTools)

## Load Data
```{r}
stan_data = readRDS(here("1_IGT_PP", "Data", "1_Preprocessed", "List_Sess1&2.rds"))
stan_data = readRDS(here("1_IGT_PP", "Data", "1_Preprocessed", "stan_ready_ORL_joint_retest.rds"))
full_data = readRDS(here("1_IGT_PP", "Data", "1_Preprocessed", "Dataframe_Sess1&2.rds"))
```

Expand Down
243 changes: 134 additions & 109 deletions 1_IGT_PP/Code/Stan/igt_orl_joint.stan
Original file line number Diff line number Diff line change
@@ -1,10 +1,54 @@
functions {
// to compute empirical correlation matrix given input matrix
matrix empirical_correlation(matrix X) {
int N = rows(X);
int M = cols(X);
matrix[N, M] centered;
vector[M] std_devs;
matrix[M, M] Sigma;
matrix[M, M] R;

// calculate covariance matrix
for (j in 1:M) {
centered[,j] = X[,j] - mean(X[,j]);
}
Sigma = (centered' * centered) / (N - 1);

// Calculate correlation matrix
std_devs = sqrt(diagonal(Sigma));
for (i in 1:M) {
for (j in 1:M) {
R[i,j] = Sigma[i,j] / (std_devs[i] * std_devs[j]);
}
}

return R;
}

real Phi_approx_group_mean_rng(real mu, real sigma, int n_samples) {
vector[n_samples] par;
for (i in 1:n_samples) {
par[i] = Phi_approx(normal_rng(mu, sigma));
}
return mean(par);
}

matrix Phi_approx_corr_rng(vector mu, vector sigma, matrix R, int n_samples) {
int N = rows(R);
int M = cols(R);
matrix[N,M] Sigma = quad_form_diag(R, sigma);
matrix[n_samples, M] X_sim;
for (i in 1:n_samples) {
X_sim[i,] = Phi_approx(multi_normal_rng(mu, Sigma)');
}
return empirical_correlation(X_sim);
}
}


// ---------------------------------------------------------------------------------------
data{
int<lower=1> N; // Number of participants =49
int<lower=1> S; // Number of sessions = 2
int<lower=1> T; // Total possile number of trials = 120
data {
int<lower=1> N; // Number of participants
int<lower=1> S; // Number of sessions
int<lower=1> T; // Total possile number of trials
int card[N,T,S]; // Cards presented on each trial
int Tsubj[N,S]; // Total number of trials presented to each subject on each session
int choice[N,T,S]; // Choices on each trial
Expand All @@ -13,10 +57,9 @@ data{
}


// ---------------------------------------------------------------------------------------
parameters{
parameters {
// Declare parameters
// Hyper(group)-parameters - these are & sigmas, respectively, for each parameter
// Hyper(group)-parameters
matrix[S, 5] mu_p; // S (number of sessions) x 5 (number of parameters) matrix of mus
vector<lower=0>[2] sigma_Arew;
vector<lower=0>[2] sigma_Apun;
Expand All @@ -25,10 +68,10 @@ parameters{
vector<lower=0>[2] sigma_betaP;

// Subject-level "raw" parameters - i.e., independent/uncorrelated & normally distributed person-level (random-)effects
// Note, these are S (number of sessions) x J (number of subjects) matrices
matrix[S,N] Arew_pr; // Untransformed Arews
matrix[S,N] Apun_pr; // Untransformed Apunss
matrix[S,N] K_pr; // Untransformed Ks
// Note, these are S (number of sessions) x N (number of subjects) matrices
matrix[S,N] Arew_pr;
matrix[S,N] Apun_pr;
matrix[S,N] K_pr;
matrix[S,N] betaF_pr;
matrix[S,N] betaP_pr;

Expand All @@ -40,16 +83,13 @@ parameters{
cholesky_factor_corr[2] R_chol_betaP;
}


// ---------------------------------------------------------------------------------------
transformed parameters{
// -------------------------------
transformed parameters {
// Declare transformed parameters
// Parameters to use in the reinforcement-learning algorithm
// Note, for each of these, we include the mus and the subject-level parameters (see below), allowing for shrinkage and subject-to-subject variability
matrix<lower=0,upper=1>[N,S] Arew; // Transformed Arews
matrix<lower=0,upper=1>[N,S] Apun; // Transformed Apuns
matrix<lower=0,upper=5>[N,S] K; // Transformed Ks
matrix<lower=0,upper=1>[N,S] Arew;
matrix<lower=0,upper=1>[N,S] Apun;
matrix<lower=0,upper=5>[N,S] K;
matrix[N,S] betaF;
matrix[N,S] betaP;

Expand All @@ -60,8 +100,7 @@ transformed parameters{
matrix[S,N] betaF_tilde;
matrix[S,N] betaP_tilde;

// -------------------------------
// Calculate transformed parameters
// Calculate transformed parameters

// Untransformed subject-level parameters incorporating correlation between sessions
Arew_tilde = diag_pre_multiply(sigma_Arew, R_chol_Arew) * Arew_pr;
Expand All @@ -87,11 +126,19 @@ transformed parameters{
}
}


// ---------------------------------------------------------------------------------------
model{
// -------------------------------
// Priors
model {
// Declare variables to calculate utility after each trial: These 4 (number of cards) x 2 (playing vs. not playing) matrices
matrix[4,2] ef;
matrix[4,2] ev;
matrix[4,2] pers;
matrix[4,2] utility;

real PEval;
real PEfreq;
real PEfreq_fic;
real K_tr;

// Priors
// Hyperparameters for correlations
R_chol_Arew ~ lkj_corr_cholesky(1);
R_chol_Apun ~ lkj_corr_cholesky(1);
Expand All @@ -115,32 +162,20 @@ model{
to_vector(betaF_pr) ~ normal(0, 1.0);
to_vector(betaP_pr) ~ normal(0, 1.0);

// Declare variables to calculate utility after each trial: These 4 (number of cards) x 2 (playing vs. not playing) matrices
matrix[4,2] ef;
matrix[4,2] ev;
matrix[4,2] pers;
matrix[4,2] utility;

real PEval;
real PEfreq;
real PEfreq_fic;
real K_tr;

// -------------------------------
for(i in 1:N){ // Loop through individual participants
for(s in 1:S){ // Loop though sessions for participant i
if(Tsubj[i,s] > 0){ // If we have data for participant i on session s, run through RL algorithm
for (i in 1:N) { // Loop through individual participants
for (s in 1:S) { // Loop though sessions for participant i
if (Tsubj[i,s] > 0) { // If we have data for participant i on session s, run through RL algorithm

// Initialize starting values
K_tr = pow(3, K[i,s]) - 1;
for(pp in 1:2){ // Looping over play/pass columns and assigning each 0 to each card
for (pp in 1:2) { // Looping over play/pass columns and assigning each 0 to each card
ev[:,pp] = rep_vector(0,4);
ef[:,pp] = rep_vector(0,4);
pers[:,pp] = rep_vector(0,4);
utility[:,pp] = rep_vector(0,4);
}

for(t in 1:Tsubj[i,s]){ // Run through RL algorithm trial-by-trial
for (t in 1:Tsubj[i,s]) { // Run through RL algorithm trial-by-trial

// Likelihood - predict choice as a function of utility
choice[i,t,s] ~ categorical_logit(to_vector(utility[card[i,t,s], :]));
Expand All @@ -150,7 +185,7 @@ model{
PEfreq = sign[i,t,s] - ef[card[i,t,s], choice[i,t,s]]; // Win-frequency prediction error
PEfreq_fic = -sign[i,t,s]/1 - ef[card[i,t,s], (3-choice[i,t,s])]; // Lose-frequency prediction error?

if(outcome[i,t,s] >= 0){ // If participant DID NOT lose
if (outcome[i,t,s] >= 0) { // If participant DID NOT lose
// Update expected win-frequency of what participant DID NOT chose to do
ef[card[i,t,s], (3-choice[i,t,s])] = ef[card[i,t,s], (3-choice[i,t,s])] + Apun[i,s] * PEfreq_fic;
// Update what participant chose
Expand All @@ -176,11 +211,18 @@ model{
}
}

generated quantities {
// Hyper(group)-parameters - these are 5 (number of parameters) x S (number of sessions) matrix of mus & sigmas, respectively, for each parameter
vector<lower=0,upper=1>[2] mu_Arew;
vector<lower=0,upper=1>[2] mu_Apun;
vector<lower=0,upper=5>[2] mu_K;
vector[2] mu_betaF;
vector[2] mu_betaP;

// For posterior predictive check
real choice_pred[N,T,S];

// ---------------------------------------------------------------------------------------
generated quantities { // STILL NEED TO FIGURE OUT HOW EXACTLY THIS BLOCK WORKS, PARTICULARLY WITH THE PPC
// ------------------------------------
// test-retest correlations
// test-retest correlations
corr_matrix[2] R_Arew;
corr_matrix[2] R_Apun;
corr_matrix[2] R_K;
Expand All @@ -189,46 +231,31 @@ generated quantities { // STILL NEED TO FIGURE OUT HOW EXACTLY THIS BLOCK WORKS,

// Reconstruct correlation matrix from cholesky factor
// Note that we're multipling the cholesky factor by its transpose which gives us the correlation matrix
R_Arew = R_chol_Arew * R_chol_Arew';
R_Apun = R_chol_Apun * R_chol_Apun';
R_K = R_chol_K * R_chol_K';
R_Arew = Phi_approx_corr_rng(mu_p[,1], sigma_Arew, R_chol_Arew * R_chol_Arew', 10000);
R_Apun = Phi_approx_corr_rng(mu_p[,2], sigma_Apun, R_chol_Apun * R_chol_Apun', 10000);
R_K = Phi_approx_corr_rng(mu_p[,3], sigma_K, R_chol_K * R_chol_K', 10000);
R_betaF = R_chol_betaF * R_chol_betaF';
R_betaP = R_chol_betaP * R_chol_betaP';

// ------------------------------------
// For posterior predictive check?
// Hyper(group)-parameters - these are 5 (number of parameters) x S (number of sessions) matrix of mus & sigmas, respectively, for each parameter
vector<lower=0,upper=1>[2] mu_Arew;
vector<lower=0,upper=1>[2] mu_Apun;
vector<lower=0,upper=5>[2] mu_K;
vector[2] mu_betaF;
vector[2] mu_betaP;

// For posterior predictive check
real choice_pred[N,T,S];

// Set all posterior predictions to -1 (avoids NULL values)
for(i in 1:N) {
for(s in 1:S){
for(t in 1:T){
for (i in 1:N) {
for (s in 1:S) {
for (t in 1:T) {
choice_pred[i,t,s] = -1;
}
}
}

// Group-level means
for(s in 1:S){
mu_Arew[s] = Phi_approx(mu_p[s, 1]);
mu_Apun[s] = Phi_approx(mu_p[s, 2]);
mu_K[s] = Phi_approx(mu_p[s, 3]) * 5;
// Compute group-level means
for (s in 1:S) {
mu_Arew[s] = Phi_approx_group_mean_rng(mu_p[s, 1], sigma_Arew[s], 10000);
mu_Apun[s] = Phi_approx_group_mean_rng(mu_p[s, 2], sigma_Apun[s], 10000);
mu_K[s] = Phi_approx_group_mean_rng(mu_p[s, 3], sigma_K[s], 10000) * 5;
mu_betaF[s] = mu_p[s, 4];
mu_betaP[s] = mu_p[s, 5];
}

// ------------------------------------
{ // local section, this saves time and space
// -------------------------------

// Declare variables to calculate utility after each trial: These 4 (number of cards) x 2 (playing vs. not playing) matrices
matrix[4,2] ef;
matrix[4,2] ev;
Expand All @@ -240,50 +267,48 @@ generated quantities { // STILL NEED TO FIGURE OUT HOW EXACTLY THIS BLOCK WORKS,
real PEfreq_fic;
real K_tr;

// -------------------------------
for(i in 1:N){ // Loop through individual participants
for(s in 1:S){ // Loop though sessions for participant i
if(Tsubj[i,s] > 0){ // If we have data for participant i on session s, run through RL algorithm
for (i in 1:N) { // Loop through individual participants
for (s in 1:S) { // Loop though sessions for participant i
if (Tsubj[i,s] > 0) { // If we have data for participant i on session s, run through RL algorithm

// Initialize starting values
K_tr = pow(3, K[i,s]) - 1;
for(pp in 1:2){ // Looping over play/pass columns and assigning each 0 to each card
for (pp in 1:2) { // Looping over play/pass columns and assigning each 0 to each card
ev[:,pp] = rep_vector(0,4);
ef[:,pp] = rep_vector(0,4);
pers[:,pp] = rep_vector(0,4);
utility[:,pp] = rep_vector(0,4);
}

for(t in 1:Tsubj[i,s]){ // Run through RL algorithm trial-by-trial
for (t in 1:Tsubj[i,s]) { // Run through RL algorithm trial-by-trial
// Likelihood - predict choice as a function of utility
choice_pred[i,t,s] = categorical_rng(softmax(to_vector(utility[card[i,t,s], :])));

// Likelihood - predict choice as a function of utility
choice_pred[i,t,s] = categorical_rng(softmax(to_vector(utility[card[i,t,s], :])));

// After choice, calculate prediction error
PEval = outcome[i,t,s] - ev[card[i,t,s], choice[i,t,s]]; // Value prediction error
PEfreq = sign[i,t,s] - ef[card[i,t,s], choice[i,t,s]]; // Win-frequency prediction error
PEfreq_fic = -sign[i,t,s]/1 - ef[card[i,t,s], (3-choice[i,t,s])]; // Lose-frequency prediction error?

if(outcome[i,t,s] >= 0){ // If participant DID NOT lose
// Update expected win-frequency of what participant DID NOT chose to do
ef[card[i,t,s], (3-choice[i,t,s])] = ef[card[i,t,s], (3-choice[i,t,s])] + Apun[i,s] * PEfreq_fic;
// Update what participant chose
ef[card[i,t,s], choice[i,t,s]] = ef[card[i,t,s], choice[i,t,s]] + Arew[i,s] * PEfreq;
ev[card[i,t,s], choice[i,t,s]] = ev[card[i,t,s], choice[i,t,s]] + Arew[i,s] * PEval;
} else { // If participant DID lose
// Update expected win-frequency of what participant DID NOT choose to do
ef[card[i,t,s], (3-choice[i,t,s])] = ef[card[i,t,s], (3-choice[i,t,s])] + Arew[i,s] * PEfreq_fic;
// Update what participant chose
ef[card[i,t,s], choice[i,t,s]] = ef[card[i,t,s], choice[i,t,s]] + Apun[i,s] * PEfreq;
ev[card[i,t,s], choice[i,t,s]] = ev[card[i,t,s], choice[i,t,s]] + Apun[i,s] * PEval;
}

// Update perseverance
pers[card[i,t,s], choice[i,t,s]] = 1;
pers = pers / (1 + K_tr);

// Calculate expected value of card
utility = ev + ef * betaF[i,s] + pers * betaP[i,s];
// After choice, calculate prediction error
PEval = outcome[i,t,s] - ev[card[i,t,s], choice[i,t,s]]; // Value prediction error
PEfreq = sign[i,t,s] - ef[card[i,t,s], choice[i,t,s]]; // Win-frequency prediction error
PEfreq_fic = -sign[i,t,s]/1 - ef[card[i,t,s], (3-choice[i,t,s])]; // Lose-frequency prediction error?

if (outcome[i,t,s] >= 0) { // If participant DID NOT lose
// Update expected win-frequency of what participant DID NOT chose to do
ef[card[i,t,s], (3-choice[i,t,s])] = ef[card[i,t,s], (3-choice[i,t,s])] + Apun[i,s] * PEfreq_fic;
// Update what participant chose
ef[card[i,t,s], choice[i,t,s]] = ef[card[i,t,s], choice[i,t,s]] + Arew[i,s] * PEfreq;
ev[card[i,t,s], choice[i,t,s]] = ev[card[i,t,s], choice[i,t,s]] + Arew[i,s] * PEval;
} else { // If participant DID lose
// Update expected win-frequency of what participant DID NOT choose to do
ef[card[i,t,s], (3-choice[i,t,s])] = ef[card[i,t,s], (3-choice[i,t,s])] + Arew[i,s] * PEfreq_fic;
// Update what participant chose
ef[card[i,t,s], choice[i,t,s]] = ef[card[i,t,s], choice[i,t,s]] + Apun[i,s] * PEfreq;
ev[card[i,t,s], choice[i,t,s]] = ev[card[i,t,s], choice[i,t,s]] + Apun[i,s] * PEval;
}

// Update perseverance
pers[card[i,t,s], choice[i,t,s]] = 1;
pers = pers / (1 + K_tr);

// Calculate expected value of card
utility = ev + ef * betaF[i,s] + pers * betaP[i,s];
}
}
}
Expand Down
Loading