Feature Image

R Functions for Ridge Generalized Least Squares.R

#This R program is for implementing two ridge GLS methods (rGLS_I and rGLS_D) in paper: “More efficient parameter estimates for factor analysis of ordinal variables by ridge generalized least squares” published on British Journal of Mathematical and Statistical Psychology. doi: 10.1111/bmsp.12098
#This R program is written and commented by Ge Jiang, University of Notre Dame, Sep. 23, 2016

=======================================================================================##
## Real data set: Humor (downloaded from http://personality-testing.info/_rawdata/)
## n=993, p=14, 4 factor model(nfac=4), ordinal data with 5 categories(c=5);
## v.list: assignments of the variables to the factors;
## e.g., v.list=c(2,3,4) implies a three-factor anlytic model where the first 2 variables are loaded on the first factor, the next three variables are loaded on the second factor, and the next four variables are loaded on the third factor.
## a_I and a_D are predicted by an empircall regression equation, see the article for more details;
## =======================================================================================##
humor = read.table(‘Humor.dat’)
ind = c(1,5,6,7,9,11,12,15,17,20,24,27,28,32) #subset of the original data
X = humor[,ind]

n = nrow(X); p = ncol(X); c = 5; nfac = 4; npar = p+nfac*(nfac-1)/2; v.list=c(4,4,3,3)
l_I = -3.2313 + 114.8180/n – 8.2775*p/n – 2.1215*log(p/n) + 1.1947*log(c/n)
l_D = -3.5838 + 417.0143/n – 40.9064*p/n -1.4435*log(p/n) + 0.5618*log(c/n)
a_I = exp(l_I)/(1+exp(l_I));
a_D = exp(l_D)/(1+exp(l_D));

nrep = 500 #number of bootstrap
seed.bank = sample(1:1E+5, nrep) #pre-set random seeds

boot.humor = function(b, boot=TRUE){
set.seed(seed.bank[b])
dat.b = X[sample(1:n,n,replace = boot),]
Mat = get.RGamma(dat.b)
R = Mat$R #Estimated polychoric correlation matrix
Omega = Mat$GammaRho #Estimated Gamma matrix
if (max(abs(R))==’NaN’|max(abs(R))>1|det(Omega)==’NaN’){return(paste(“the”,b,”th bootstrap sample does not converge”))}

theta_est = matrix(NA, 5, npar+4); SE_est = matrix(NA, 5, npar+4); Stat_est = matrix(NA, 5, 12)

#ULS, DWLS, GLS, rGLS, rdGLS
run_est = FisherGLS(dat.b, R, Omega, v.list=v.list, type=’uls’, a=1)
theta_est[1,] = run_est$theta0; SE_est[1,] = run_est$SE; Stat_est[1,] = run_est$stat

run_est = FisherGLS(dat.b, R, Omega, v.list=v.list, type=’dwls’, a=1)
theta_est[2,] = run_est$theta0; SE_est[2,] = run_est$SE; Stat_est[2,] = run_est$stat

run_est = FisherGLS(dat.b, R, Omega, v.list=v.list, type=’gls’, a=0)
theta_est[3,] = run_est$theta0; SE_est[3,] = run_est$SE; Stat_est[3,] = run_est$stat

run_est = FisherGLS(dat.b, R, Omega, v.list=v.list, type=’rgls’, a=a_I)
theta_est[4,] = run_est$theta0; SE_est[4,] = run_est$SE; Stat_est[4,] = run_est$stat

run_est = FisherGLS(dat.b, R, Omega, v.list=v.list, type=’rdwls’, a=a_D)
theta_est[5,] = run_est$theta0; SE_est[5,] = run_est$SE; Stat_est[5,] = run_est$stat

return(list(final = cbind(theta_est, SE_est[,-c(1:4)], Stat_est[,-c(1:4)])))
}

# By specifying the boot option to FALSE, boot.humor outputs parameter estimates of the orignal data set
humor.ori=boot.humor(1, boot=FALSE)

## Use of parallel computing
library(parallel)
boot.est <-mclapply(1:nrep, boot.humor, mc.cores=ncores)

## =======================================================================================##
## Subroutine to compute standard bivariate normal pdf;
##
## s/t: a category of variables V_j and V_k, respectively
## rho: polychoric correlation p_jk
## tau: a list containing thresholds of V_j and V_k
## =======================================================================================##
library(mvtnorm)
phi_pdf = function(s, t, rho, tau)
{
n1 = length(tau[[1]])+1; n2 = length(tau[[2]])+1;
if (s==0|s==n1|t==0|t==n2) {p = 0} else {
p = dmvnorm(c(tau[[1]][s], tau[[2]][t]), mean = c(0,0), sigma = matrix(c(1, rho, rho, 1), 2))}
return(p)
}

## =======================================================================================##
## Subroutine to compute standard bivariate normal cdf;
## pi_jk^st in in equation (4) of the article;
## =======================================================================================##
phi_cdf = function(s, t, rho, tau)
{
tau[[1]] = c(-Inf, tau[[1]], Inf); tau[[2]] = c(-Inf, tau[[2]], Inf)
p = pmvnorm(lower = c(tau[[1]][s],tau[[2]][t]), upper = c(tau[[1]][s+1],tau[[2]][t+1]), mean = c(0,0), sigma = matrix(c(1, rho, rho, 1), 2))
return(p)
}

## =======================================================================================##
## 1st order derivative of phi_cdf to p_jk; ## d(phi_cdf)/d(p_jk)
## =======================================================================================##
pi_to_p = function(s, t, rho, tau)
{
pi_st_to_p_jk = phi_pdf(s, t, rho, tau) – phi_pdf(s, t-1, rho, tau) – phi_pdf(s-1, t, rho, tau) + phi_pdf(s-1, t-1, rho, tau)
return(pi_st_to_p_jk)
}

## =======================================================================================##
## Subroutine to estimate polychoric correlation matrix (R) based on pseudo maximum likelihood;
## (Gong & Samaniego, 1981);
## dat: categorical data set; hat.tau: a list of estimated thresholds for all variables
## =======================================================================================##
fisher.polychor = function(dat, hat.tau)
{
cat(‘Compute Polychoric Correlation…’, ‘\n’)
n = nrow(dat); p = ncol(dat);

# Number of categories for each variable
ncat = unname(apply(dat, 2, function(X) length(unique(X))))

ep = 1E-5 # Convergence criterion
itr = 50 # Max number of iterations
idiv = 0 # nonconvergence (0: converged; 1:non-converged)

R = diag(1, p)

scorr = cor(dat) #use pearson’s correlations as initial values

for (j in 1 : (p-1)){
for (k in (j+1) : p){

p_jk = scorr[j,k]
tau = hat.tau[c(j,k)]
ncount = table(dat[,j], dat[,k])
#empty cells in the contingency table: add a tiny number
ncount[ncount<0.1] = 1/ncat[j]/ncat[k]
E_l_dotdot_p_jk = l_dot_p_jk = pii_st = matrix(NA, ncat[j], ncat[k])

for (i in 1 : itr){
for (s in 1 : ncat[j]){
for (t in 1 : ncat[k]){

pii_st[s,t] = phi_cdf(s, t, p_jk, tau)
l_dot_p_jk[s,t] = ncount[s,t] * pi_to_p(s, t, p_jk, tau) / pii_st[s,t]
E_l_dotdot_p_jk[s,t] = (pi_to_p(s, t, p_jk, tau)^2) / pii_st[s,t] }}

l_dot = sum(l_dot_p_jk) # first order derivative
E_l_dotdot = -n * sum(E_l_dotdot_p_jk) # expectation of the second order derivative

dt = l_dot / E_l_dotdot
p_jk = p_jk – dt

if (abs(dt)<ep) break
if (abs(p_jk)>1) break
}
if (i==itr|abs(p_jk)>1) cat(‘Cannot estimate polychoric correlation for V’,j,’ and V’,k,’ using Fisher Scoring’, ‘\n’,sep = ”)
if (i==itr|abs(p_jk)>1) break
R[j,k] = R[k,j] = p_jk
}}
return(R)
}

## =======================================================================================##
## Subroutine to estimate Gamma matrix for vech(R);
## =======================================================================================##
get.RGamma = function(dat)
{
# test if data set contains continuous variables or variables that have only one answer
cont = apply(dat, 2, function(X) sum(abs(X-round(X))))
if (any(cont!=0)){
stop(“data set contains continuous variables: “, paste0(“V”,which(cont!=0),collapse=’,’))
}
Vari = apply(dat, 2, var)
if (any(Vari==0)){
stop(“data set contains variables with zero variance: “, paste0(“V”,which(Vari==0),collapse=’,’))
}
cat(“All clear!”,’\n’)

n = nrow(dat); p = ncol(dat);
dat = as.matrix(dat, n, p)
# Number of categories for each variable
ncat = unname(apply(dat, 2, function(X) length(unique(X))))
# Category labels used by each variable
catt = lapply(1:p, function(X) sort(unique(dat[,X])))

# estimated thresholds based on marginal frequencies
hat.tau = lapply(1:p, function(X) head(qnorm( cumsum(table(dat[,X])/n) ), -1))

# polychoric correlation matrix
R = fisher.polychor(dat, hat.tau)

# the count of NAs in sample correlation matrix
na = sum (unlist (lapply (unlist(R), function (x) which (is.na (x)))))
if (na>0|max(abs(R))>1){stop(“Correlation out of bounds”)}

## ===================================================================##
## Estimating Gamma matrix using the estimating equation approach
## ===================================================================##
cat(‘Gamma Matrix Estimation…’, ‘\n’)

g3 = NULL
for(t in 1 : p) {
g3 = cbind(g3, sapply(1:(length(catt[[t]])-1), function(X) ifelse(dat[,t]<=catt[[t]][X], 1, 0)) – matrix(rep(pnorm(hat.tau[[t]]), n), n, byrow=T))
}

g6 = matrix(NA, n, p*(p-1)/2)
for (i in 1 : n){
ini = 0
for(j in 1 : (p-1) ){
a = match(dat[i,j], catt[[j]]);
for (k in (j+1) : p){
ini = ini + 1
b = match(dat[i,k], catt[[k]]); c = R[j,k]; d = hat.tau[c(j,k)]
g6[i,ini] = pi_to_p(a, b, c, d) / phi_cdf(a, b, c, d)
}}}

G = cbind(g3, g6)

B = matrix( rowMeans(apply(G, 1, function(X) as.matrix(X)%*%t(X))), ncol(G), byrow=T)

A33 = -diag(dnorm(unlist(hat.tau)), ncol(g3))

A63 = matrix(0, ncol(g6), ncol(g3))
A66 = matrix(0, ncol(g6), ncol(g6))
loc = list(); for (t in 1:p){loc[[t]]=seq((c(0,cumsum(ncat-1))[t]+1),cumsum(ncat-1)[t])}

ini = 0
for (j in 1 : (p-1)){
for (k in (j+1) : p){
ini = ini + 1
tau = hat.tau[c(j,k)]
taus = list(t1=c(-Inf, tau[[1]], Inf), t2=c(-Inf, tau[[2]], Inf))
rho = R[j,k]
store63_js_k = matrix(NA, ncat[j]-1, ncat[k]); store63_j_kt = matrix(NA, ncat[k]-1, ncat[j])
store66 = matrix(NA, ncat[j], ncat[k])

for (s in 1 : (ncat[j]-1)){
for (t in 1 : ncat[k]){
num1 = pi_to_p(s, t, rho, tau)
den1 = phi_cdf(s, t, rho, tau)
num2 = pi_to_p(s+1, t, rho, tau)
den2 = phi_cdf(s+1, t, rho, tau)
z1 = (taus[[2]][t+1]-rho*taus[[1]][s+1])/sqrt(1-rho^2)
z2 = (taus[[2]][t]-rho*taus[[1]][s+1])/sqrt(1-rho^2)
mul = (pnorm(z1)-pnorm(z2))*dnorm(taus[[1]][s+1])
store63_js_k[s,t] = (num1/den1 – num2/den2)*mul
}}

for (t in 1 : (ncat[k]-1)){
for (s in 1 : ncat[j]){
num3 = pi_to_p(s, t, rho, tau)
den3 = phi_cdf(s, t, rho, tau)
num4 = pi_to_p(s, t+1, rho, tau)
den4 = phi_cdf(s, t+1, rho, tau)
z3 = (taus[[1]][s+1]-rho*taus[[2]][t+1])/sqrt(1-rho^2)
z4 = (taus[[1]][s]-rho*taus[[2]][t+1])/sqrt(1-rho^2)
mul = (pnorm(z3)-pnorm(z4))*dnorm(taus[[2]][t+1])
store63_j_kt[t,s] = (num3/den3 – num4/den4)*mul
}}

for (s in 1 : ncat[j]){
for (t in 1 : ncat[k]){
num5 = pi_to_p(s, t, rho, tau)
den5 = phi_cdf(s, t, rho, tau)
store66[s,t] = num5^2/den5
}}

A63[ini,loc[[j]]] = -rowSums(store63_js_k)
A63[ini,loc[[k]]] = -rowSums(store63_j_kt)
A66[ini,ini] = -sum(store66)
#print(c(j,k,-rowSums(store63_js_k)))
#print(c(j,k,-rowSums(store63_j_kt)))
}}

## =================================================##
### Sandwich Covariance matrix A^(-1)BA’^(-1) ###
## =================================================##
ind = cumsum(c(ncol(g3), ncol(g6)))
A = matrix(0, ncol(G), ncol(G))
A[1:ind[1], 1:ind[1]] = A33
A[(ind[1]+1):ind[2], 1:ind[1]] = A63
A[(ind[1]+1):ind[2], (ind[1]+1):ind[2]] = A66
A.inv = solve(A)

Gamma = A.inv%*%B%*%t(A.inv)
GammaRho = Gamma[(ind[1]+1):ind[2], (ind[1]+1):ind[2]]
return(list(R=R, GammaRho=GammaRho))
}
## =======================================================================================##
## Subroutine to compute Derivative of vech(sigma);
## theta0: parameter estimates
## =======================================================================================##
sigdsig = function(theta0, v.list){
nvar = sum(v.list) #number of variables
nfac = length(v.list) #number of factors
lfac = nfac*(nfac-1)/2 #number of correlations between factors
sp = split(1:nvar, rep(1:nfac,v.list)) #Assignments of variables

# factor loadings lambda given the input theta0
lamb = matrix(0, nvar, nfac)
for (j in 1 : nfac){
lamb[sp[[j]], j] = theta0[sp[[j]]]
}
#factor correlations phi given the input theta0
phi = diag(1, nfac)
phi[lower.tri(phi, diag=F)] = theta0[(nvar+1) : (nvar+lfac)]
phi = phi + t(phi) – diag(1, nfac)

# model-implied covariance matrix
Sigma0 = lamb%*%phi%*%t(lamb)
diag(Sigma0) = 1
vsig0 = Sigma0[lower.tri(Sigma0, diag=F)]

phl = phi%*%t(lamb)
lph = lamb%*%phi
dsigma = array(0, dim = c(nvar, nvar, length(theta0)))
dl = dps = matrix(0, length(vsig0), nvar)
dph = matrix(0, length(vsig0), lfac)

# derivatives with respect to factor loadings
for (i in 1 : nvar){
tt = matrix(0, nvar, nfac)
col = rep(1:nfac,v.list)[i]
tt[i, col] = 1
ttt = tt%*%phl+lph%*%t(tt)
dsigma[,,i] = ttt
dl[,i] = ttt[lower.tri(ttt, diag=F)]
}

# derivatives with respect to factor correlations
for (i in 1 : lfac){
tt = diag(0, nfac)
tt[lower.tri(tt, diag=F)][i] = 1
tt = tt + t(tt)
ttt = lamb%*%tt%*%t(lamb)
dsigma[,,(nvar+i)] = ttt
dph[,i] = ttt[lower.tri(ttt, diag=F)]
}

# derivatives with respect to error variances
# for (i in 1 : nvar){
# tt = diag(0, nvar)
# tt[i,i] = 1
# dsigma[,,(nvar+lfac+i)] = tt
# dps[,i] = tt[lower.tri(tt, diag=F)]
#}
#DS0 = cbind(dl,dph,dps)
DS0 = cbind(dl,dph)
return(list(Sigma0 = Sigma0, vsig0 = vsig0, dsigma = dsigma, DS0 = DS0))
}
## =======================================================================================##
## Subroutine to obtain parameter estimates of the factor analytic model using Fisher Scoring;
## Output includes parameter estimates, sandwich-type SEs, and test statistics;
## Statistics include regular, rescaled, and adjusted GLS statistics;
##
## R: polychoric correlation matrix;
## Omega: estimated Gamma matrix from get.RGamma function
## v.list: assignments of the variables to the factors;
## e.g., v.list=c(2,3,4) implies a three-factor anlytic model where the first 2 variables are loaded on the first factor, the next three variables are loaded on the second factor, and the next four variables are loaded on the third factor.
## type: estimation method used in the analysis (default at gls)
## a: the value of a used in the ridge methods (default at 0)
## =======================================================================================##
FisherGLS = function(dat, R, Omega, v.list, type=’gls’, a=0){
n = nrow(dat); p = ncol(dat); dat = as.matrix(dat, n, p)
nfac = length(v.list);

## Estimation Option
## Middle term in equation (6) of the article
if (type==’uls’) {Gamma = diag(1, ncol(Omega))
} else if (type==’dwls’) {Gamma = diag(diag(Omega), ncol(Omega))
} else if (type==’rdwls’) {Gamma = (1-a)*Omega + a*diag(diag(Omega), ncol(Omega))
} else if (type==’rgls’) {Gamma = (1-a)*Omega + a*diag(1, ncol(Omega))
} else {Gamma = Omega}
## ===========================================

InvGamma = solve(Gamma)
theta0 = c(rep(0.7, p), rep(0.5, nfac*(nfac-1)/2)) #initial values
vs = R[lower.tri(R, diag=F)] #vector of all polychoric correlations

pstar = p*(p-1)/2
q = length(theta0)
df = pstar-q

ep = 1E-4 # convergence criterion
ep1 = 1E-15 #criterion to determine matrix singularity
itr = 150 #Max number of iterations
idiv = 0 # nonconvergence (0: converged; 1:non-converged)

for (i in 1 : itr){
deriv = sigdsig(theta0, v.list)
DS0 = deriv$DS0 #rho dot
Sigma = deriv$Sigma0 #model-implied cov matrix

dswe = t(DS0)%*%InvGamma
dwd = dswe%*%DS0 #negative second order derivative
if (1/kappa(dwd)<ep1){idiv = idiv+1}
if (1/kappa(dwd)<ep1) break
stdi = solve(dwd)
vsig = Sigma[lower.tri(Sigma, diag=F)]
eresdu = vs-vsig

ddval = eigen(dwd)$values
if (ddval[q]<ep1){idiv = idiv+1}
if (ddval[q]<ep1) break

delt = stdi%*%dswe%*%eresdu
theta0 = theta0 + delt

dt = max(abs(delt))

if(dt<ep) break
}

##Recalculating with final theta0
deriv = sigdsig(theta0, v.list)
DS0 = deriv$DS0
dswe = t(DS0)%*%InvGamma
dwd = dswe%*%DS0
if (1/kappa(dwd)>ep1){stdi = solve(dwd)}
Sigma = deriv$Sigma0
vsig = Sigma[lower.tri(Sigma, diag=F)]
eresdu = vs-vsig

##calculate sandwich-type se with information matrix
Var = stdi%*%dswe%*%Omega%*%t(dswe)%*%stdi
SE = sqrt(diag(Var)/(n-1))

##### Statistics start here

#GLS statistics
f_GLS = t(eresdu)%*%InvGamma%*%eresdu
#T_GLS = (n-1)*f_GLS
T_GLS = n*f_GLS
p_GLS = 1-pchisq(T_GLS, df)

#Rescaled and adjusted GLS statistics
Vmat = InvGamma-t(dswe)%*%stdi%*%dswe
VO = Vmat%*%Omega
tVO = sum(diag(VO))
VO2 = VO%*%VO
tVO2 = sum(diag(VO2))
cst = tVO/df

T_RGLS = T_GLS/cst
p_RGLS = 1-pchisq(T_RGLS, df)
cst_1 = tVO2/tVO
cst_2 = (tVO*tVO)/tVO2
T_AGLS = T_GLS/cst_1
p_AGLS = 1-pchisq(T_AGLS, cst_2)

theta0 = rbind(n, p, nfac, a, theta0); SE = c(n, p, nfac, a, SE);
stat = matrix(c(n,p,nfac,a,T_GLS,p_GLS,T_RGLS,p_RGLS,T_AGLS,cst_2,p_AGLS,idiv),1)
colnames(stat) = c(‘n’,’p’,’nfac’,’a’,’T_gls’,’p_gls’,’T_rgls’,’p_rgls’,’T_agls’,’df_agls’,’p_agls’,’noncv’)

return(list(theta0 = theta0, SE = SE, stat = stat))
}
##

Four New Corrected Statistics for SEM (R)

This file contains an R program to obtain the test statistics proposed in paper “Four New Corrected Statistics for SEM With Small Samples and Nonnormally Distributed Data”.

For citation of this article, please consider:

Jiang, G., & Yuan, K.-H. (2016). Four New Corrected Statistics for SEM With Small Samples and Nonnormally Distributed Data. Structural Equation Modeling: A Multidisciplinary Journal, 1–16. doi: 10.1080/10705511.2016.1277726

Authors: Ge Jiang and Ke-Hai Yuan

##=======================================================================================##
## This program requires the input of likelihood ratio statistic and Satorra-Bentler’s mean rescaled statistic
## from standard output from any major statistic packages (Mplus, SAS, EQS, R:lavaan)
##=======================================================================================##

# The likelihood ratio statistic:
T_ml;
# Satorra-Bentler’s mean rescaled statistic:
T_rml;
# Degrees of freedom:
df;
# Satorra-Bentler’s mean and variance adjusted statistic:
T_aml; df_aml;

# Scaling factor in T_rml:
r_1 = T_ml/T_rml
# trace of the UGamma matrix:
tr = r_1*df
# rank of the UGamma matrix:
rk = min(df, n-1)

# First Corrected Statistic
const1 = tr/rk
T_cor1 = T_ml/const1
p_cor1 = 1-pchisq(T_cor1, df)
# Second Corrected Statistic
const2 = (r_1+const1)/2
T_cor2 = T_ml/const2
p_cor2 = 1-pchisq(T_cor2, df)
# Third Corrected Statistic
T_cor3 = (T_rml+T_cor1)/2
p_cor3 = 1-pchisq(T_cor3, df)
# Fourth Corrected Statistic
p_rml = 1-pchisq(T_rml, df)
p_aml = 1-pchisq(T_aml, df_aml)
p_cor4 = (p_rml+p_aml)/2