Appendix F: R Code for Calculating Benchmarked Scales Scores

N O T E
For users wanting to preserve legacy trend lines: If you would like to generate scale scores as produced by the platform prior to the release of VM 3.0 in November 2017 (aka “legacy” scale scores), the EDSCLS Help Desk can send you the legacy R code. Please contact the Help Desk at edscls@air.org or (866) 730-6735.

##########INSTRUCTION###############################

#You only need to specify your data folder and csv dataset name in the "INPUTS" section below

#The output dataset will be produced in the folder you specify

#Comment: This program calculates scale scores for each survey taker. The calculated scale scores are just

#         additional analysis variables attached to your input

dataset. If your survey has a complex sample

#         design (e.g., you took a sample from your target population with stratification, clustering, etc.),

#         you should apply your design specifications (strata, cluster, weights, etc.) as you would do when

#         analyzing other survey variables.

####################################################

##########INPUTS###############################

#Specify your data folder and csv dataset name folder <- "FOLDER PATH"

dataname <- "student data.csv"

##########SCALING PROGRAM######################

#Option for decimal places options(digits=10)

#Rounding function that rounds .5 up to the next integer round2 = function(x, n) {

posneg = sign(x) z = abs(x)*10^n z = z + 0.5

z = trunc(z)

z = z/10^n z*posneg

}

#Scale score calculation function rasch <- function(scalelist=NULL){

#Stepvalues that are estimated from the national benchmarking. These are the values in Table A4 of the national benchmarking documentation

#(https://safesupportivelearning.ed.gov/sites/default/files/EDSCLS_Psy chometric_Benchmarking_Technical_Report.pdf).

#These three sets of values are used to calculate the scale scores for any survey takers.

c0 <- c("SENGCLC1","SENGCLC2","SENGCLC3","SENGCLC4","SENGCLC7","SENGREL9","S ENGREL11","SENGREL12","SENGREL14","SENGREL153","SENGREL17","SENGREL20"

,"SENGREL21","SENGREL29",

"SENGPAR44","SENGPAR45","SENGPAR46","SENGPAR47","SENGPAR48","SSAF EMO49","SSAFEMO52","SSAFEMO53","SSAFEMO54","SSAFEMO56","SSAFEMO57",

"SSAFPSAF60","SSAFPSAF63","SSAFPSAF65","SSAFPSAF67","SSAFPSAF68", "SSAFPSAF69","SSAFPSAF71","SSAFBUL74","SSAFBUL75","SSAFBUL76","SSAFBUL 77B","SSAFBUL73","SSAFBUL83",

"SSAFSUB88","SSAFSUB91","SSAFSUB92","SSAFSUB93","SSAFSUB94","SENV PENV100","SENVPENV102","SENVPENV105","SENVPENV106","SENVPENV107",

"SENVINS111","SENVINS113","SENVINS114","SENVINS115","SENVINS121", "SENVMEN130","SENVMEN132","SENVMEN133","SENVMEN134","SENVMEN137",

"SENVDIS142","SENVDIS143","SENVDIS146","SENVDIS147","SENVDIS147C"

)

c1 <- c(-0.88758,-1.21688,-1.70151,-1.13062,-1.59396,-0.9249,-

1.6756,-1.37546,-1.19418,-1.35606,-1.32961,-0.77372,-1.21581,-

1.45891,-1.02078,-1.15759,-0.83518,-1.57761,-1.54687,-1.19732,-

0.79951,-1.15283,-0.652,-1.1754,-1.21302,-1.40832,-1.53905,-1.71611,-

0.94029,-0.58,-1.03096,-0.72259,-0.98061,-1.18313,-0.94225,-1.00375,-

0.83535,-0.47988,-1.2565,-1.28382,-1.4722,-1.09243,-1.04787,-0.22565,-

0.83049,-1.56286,-1.58736,-1.27968,-1.47932,-1.53159,-1.49103,-

1.57155,-2.2499,-1.45449,-1.49858,-1.42755,-0.20715,-0.59274,-

1.81967,-1.40034,-1.56048,-1.03239,-1.03612)

c2 <- c(-0.21689,-0.27319,-1.0471,-0.11206,-1.06613,0.07381,-

0.63246,-0.30377,-1.18031,-1.27296,-0.53273,0.42434,-0.11198,-

1.67465,-0.38939,-0.3564,0.19951,-1.54482,-1.21379,-

0.08038,0.6924,0.25698,-0.98285,-0.86814,-1.09519,-1.56709,-1.81347,-

0.96873,0.35623,0.38108,0.19308,0.15162,-0.46344,-0.71079,-0.13648,-

0.29701,0.09477,0.53213,-0.55163,-0.41416,-0.71855,-

0.02012,0.09659,0.46013,0.48755,-0.9234,-0.77854,0.15582,-0.62922,-

0.99408,-0.29674,-1.21026,-2.46508,-1.00195,-1.06141,-

0.88773,1.33389,0.61958,-1.84193,-0.55586,-0.52414,-0.47264,-0.48429) c3 <-

c(1.51974,1.64389,1.90232,1.70962,1.54944,2.41102,2.09221,2.13021,1.69

008,1.58491,2.16515,2.91955,2.90247,1.27887,1.40529,1.2542,2.39614,0.9

7705,1.67163,3.05184,2.64666,2.86166,1.31697,1.59058,1.57176,1.39777,0

.00153,0.16629,1.60171,1.49101,1.89466,2.2059,1.23622,1.13569,1.18006,

1.47382,2.16486,2.06854,0.49417,0.54856,0.34286,0.61828,0.57692,2.6698

3,2.48407,2.10358,2.00146,2.34459,1.70855,1.92133,2.11189,1.09009,0.32

927,1.46992,1.52817,1.52252,2.7488,2.81182,1.31956,1.72246,1.92085,1.2

7339,1.82711)

Rstepvalues <- as.data.frame(rbind(c1,c2,c3))

names(Rstepvalues) <- c0

#Items that are negatively valenced. They are reverse-coded below.

negative <-  c("SSAFPSAF63", "SSAFPSAF65", "SSAFPSAF67", "SSAFPSAF68", "SSAFPSAF69", "SSAFPSAF71", "SSAFBUL74", "SSAFBUL75", "SSAFBUL76", "SSAFBUL77B",

"SSAFBUL73", "SSAFBUL83","SSAFSUB88","SSAFSUB91","SSAFSUB92","SSAFSUB93","SSAFSUB94 ")

#Loop through the scale list for (scalename in scalelist) {

#Stepvalues for the items of the current scale stepvalues <- Rstepvalues[eval(as.name(scalename))] items <- names(stepvalues)

#Only use the variables that belong to scales tempdata <- indata[c0]

#Code negative values to missing tempdata[tempdata < 0] <- NA

#Reverse code negatively valenced items tempdata[negative] <- 5-tempdata[negative]

#Use only the variables of the current scale tempdata <- tempdata[items]

#Recode values from the range of 1-4 to 0-3 tempdata <- tempdata[]-1

#The algorithm below is based on a Rasch partial credit model.

#See section 3.1 of the national benchmarking documentation referenced above for more details.

#Further reading: http://www.winsteps.com/a/Linacre-estimation-

further-topics.pdf

tdata <- tempdata

tdata$counts = apply(tdata,1,function(x) sum(!is.na(x[]))) titems <- names(stepvalues)

tstepvalues <- stepvalues

tv1 <- paste("tv1", titems, sep="") tv2 <- paste("tv2", titems, sep="") tv3 <- paste("tv3", titems, sep="") tdata[tv1] <- stepvalues[1,] tdata[tv2] <- stepvalues[2,] tdata[tv3] <- stepvalues[3,]

nm <- paste("nm", titems, sep="") tdata[nm] <- tdata[titems]*0+1

tdata$anm <- rowSums(tdata[nm],na.rm=TRUE) tdata[tv1] <- tdata[tv1]*tdata[nm] tdata[tv2] <- tdata[tv2]*tdata[nm] tdata[tv3] <- tdata[tv3]*tdata[nm]

tdata$rl <- tdata$anm*0 tdata$ru <- tdata$anm*3 tdata$ru[tdata$ru==0] <- 1

tdata$r <- rowSums(tdata[items],na.rm=TRUE)

tdata$r[tdata$r==tdata$rl] <- tdata$rl[tdata$r==tdata$rl]+0.3

tdata$r[tdata$r==tdata$ru] <- tdata$ru[tdata$r==tdata$ru]-

0.3

d <- mean(colMeans(tstepvalues[],na.rm=TRUE))

tdata$t0 <- d + log((tdata$r-tdata$rl)/(tdata$ru-tdata$r))

tdata$t <- 0

tdata$t1 <- 1

iter <- 1

while(max(abs(tdata$t1-tdata$t),na.rm=TRUE)>0.0000001 &

iter <=100) {

if (iter==1) {tdata$t <- tdata$t0

} else if (iter>1) {tdata$t <- tdata$t1}

iter <- iter+1

for (n in 1:ncol(tstepvalues)) {

jp <- paste("jp", titems[n], sep="")

j2p <- paste("j2p", titems[n], sep="")

dj <- paste("dj", titems[n], sep="") tdata$p1 <- exp(1*tdata$t-tdata[,tv1[n]])/

(1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p2 <- exp(2*tdata$t-tdata[,tv1[n]]-

tdata[,tv2[n]])/

(1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p3 <- exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]])/

(1+exp(1*tdata$t- tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p1[is.na(tdata[,tv1[n]])] <- NA

tdata$p2[is.na(tdata[,tv1[n]])] <- NA

tdata$p3[is.na(tdata[,tv1[n]])] <- NA

tdata[jp] <- 1*tdata$p1+2*tdata$p2+3*tdata$p3 tdata[j2p] <-

1*1*tdata$p1+2*2*tdata$p2+3*3*tdata$p3

tdata[dj] <- tdata[j2p]-tdata[jp]*tdata[jp]

}

tdata$e <- rowSums(tdata[, grep("jp", names(tdata))],na.rm = TRUE)

tdata$v <- rowSums(tdata[, grep("dj",

names(tdata))],na.rm = TRUE)

tdata$t1 <- tdata$t+(tdata$r- tdata$e)/pmax(2*tdata$v,1,na.rm = TRUE)

}

#Set to missing if the algorithm did not converge tdata$t1[tdata$t1>30] <- NA

#Only calculate score scores for those valid responses to at least three items and more than half of the items

tdata$t1[tdata$counts<3 | tdata$counts<0.5*ncol(tempdata)] <- NA

##########SCALING FACTORS######################

#These factors are used to recale the scale scores so that the cut scores are 300 and 400 for all scales.

#This is a linear transformation so it does not affect the relative distance between two scores: The distance is inflated by a constant factor B.

#See section 3.3 the national benchmarking documentation

referenced above for more details. if (scalename == 'eng' ) {

A <- 325.859

B <- 40.557

}

if (scalename == 'clc' ) {

A <- 325.697

B <- 44.614

}

if (scalename == 'rel' ) { A <- 322.89

B <- 36.517

}

if (scalename == 'par' ) { A <- 332.549

B <- 44.683

}

if (scalename == 'saf' ) {

A <- 320.161

B <- 60.027

}

if (scalename == 'emo' ) { A <- 315.079

B <- 39.663

}

if (scalename == 'psaf' ) { A <- 328.929

B <- 59.402

}

if (scalename == 'bul' ) { A <- 310.828

B <- 58.872

}

if (scalename == 'sub' ) { A <- 338.85

B <- 119.933

}

if (scalename == 'env' ) { A <- 326.967

B <- 40.341

}

if (scalename == 'penv' ) { A <- 307.241

B <- 40.106

}

if (scalename == 'ins' ) { A <- 344.491

B <- 38.992

}

if (scalename == 'men' ) { A <- 318.995

B <- 41.928

}

if (scalename == 'dis' ) { A <- 333.135

B <- 41.56

}

#Rescale the scores based on the factors above tdata$tscore <- round2(tdata$t1*B+A,0)

#Keep only the calculated scale scores score_data <- tdata[c("tscore")]

#Rename the data using the scale name and save it to the workspace

colnames(score_data) <- paste(scalename, colnames(score_data), sep = "_")

assign(scalename,score_data,envir = .GlobalEnv)

}

}

##########SPECIFICATION AND OUTPUT######################

#Read in data

indata <- read.table(file=sprintf("%s\\%s", folder, dataname),header=TRUE,sep=",")

names(indata) <- toupper(names(indata))

#Item names for each scale eng <-

c("SENGCLC1","SENGCLC2","SENGCLC3","SENGCLC4","SENGCLC7","SENGREL9","S

ENGREL11","SENGREL12","SENGREL14","SENGREL153","SENGREL17","SENGREL20"

,"SENGREL21","SENGREL29", "SENGPAR44","SENGPAR45","SENGPAR46","SENGPAR47","SENGPAR48")

clc <- c("SENGCLC1", "SENGCLC2", "SENGCLC3", "SENGCLC4", "SENGCLC7")

rel <- c("SENGREL9","SENGREL11","SENGREL12","SENGREL14","SENGREL153","SENGREL 17","SENGREL20","SENGREL21","SENGREL29")

par <- c("SENGPAR44","SENGPAR45","SENGPAR46","SENGPAR47","SENGPAR48")

saf <- c("SSAFEMO49","SSAFEMO52","SSAFEMO53","SSAFEMO54","SSAFEMO56","SSAFEMO 57","SSAFPSAF60","SSAFPSAF63","SSAFPSAF65","SSAFPSAF67","SSAFPSAF68"," SSAFPSAF69","SSAFPSAF71",

"SSAFBUL74","SSAFBUL75","SSAFBUL76","SSAFBUL77B","SSAFBUL73","SSA FBUL83","SSAFSUB88","SSAFSUB91","SSAFSUB92","SSAFSUB93","SSAFSUB94") emo <- c("SSAFEMO49", "SSAFEMO52", "SSAFEMO53", "SSAFEMO54", "SSAFEMO56", "SSAFEMO57")

psaf <- c("SSAFPSAF60", "SSAFPSAF63", "SSAFPSAF65", "SSAFPSAF67", "SSAFPSAF68", "SSAFPSAF69", "SSAFPSAF71")

bul <- c("SSAFBUL74","SSAFBUL75","SSAFBUL76","SSAFBUL77B","SSAFBUL73","SSAFBU L83")

sub <- c("SSAFSUB88","SSAFSUB91","SSAFSUB92","SSAFSUB93","SSAFSUB94")

env <-

c("SENVPENV100","SENVPENV102","SENVPENV105","SENVPENV106","SENVPENV107 ","SENVINS111","SENVINS113","SENVINS114","SENVINS115","SENVINS121","SE NVMEN130","SENVMEN132",

"SENVMEN133","SENVMEN134","SENVMEN137","SENVDIS142","SENVDIS143", "SENVDIS146","SENVDIS147","SENVDIS147C")

penv <-

c("SENVPENV100","SENVPENV102","SENVPENV105","SENVPENV106","SENVPENV107 ")

ins <- c("SENVINS111","SENVINS113","SENVINS114","SENVINS115","SENVINS121") men <- c("SENVMEN130", "SENVMEN132", "SENVMEN133", "SENVMEN134", "SENVMEN137")

dis <- c("SENVDIS142","SENVDIS143","SENVDIS146","SENVDIS147","SENVDIS147C")

#Run scaling program for each scale rasch(scalelist=c("eng","clc","rel","par","saf","emo","psaf","bul","su b","env","penv","ins","men","dis"))

#Attach the scale score data to the input data out <-

cbind(indata,eng,clc,rel,par,saf,emo,psaf,bul,sub,env,penv,ins,men,dis

)

#Exclude cases if they did not provide any demographic information for (v in

c("eng","clc","rel","par","saf","emo","psaf","bul","sub","env","penv", "ins","phea","men","dis")) {

out[which((is.na(out$SDEMO148) | out$SDEMO148 == "" | out$SDEMO148

== -1) & (is.na(out$SDEMO149) | out$SDEMO149 == "" | out$SDEMO149 == -

1) & (is.na(out$SDEMO150) | out$SDEMO150 == "" | out$SDEMO150 == -1)

& (is.na(out$SDEMO151) | out$SDEMO151 == "" | out$SDEMO151 == -

1)),paste(v,"_tscore",sep="")] <- NA

}

##########OUTPUT DATASET WITH SCALE SCORES###############

write.table(out, sprintf("%s\\scale_score_%s", folder, dataname), col.names=T,row.names=F,sep=",")

##########END OF PROGRAM#################################

N O T E
For users wanting to preserve legacy trend lines: If you would like to generate scale scores as produced by the platform prior to the release of VM 3.0 in November 2017 (aka “legacy” scale scores), the EDSCLS Help Desk can send you the legacy R code. Please contact the Help Desk at edscls@air.org or (866) 730-6735.

##########INSTRUCTION###############################

#You only need to specify your data folder and csv dataset name in the "INPUTS" section below

#The output dataset will be produced in the folder you specify

#Comment: This program calculates scale scores for each survey taker. The calculated scale scores are just

#         additional analysis variables attached to your input dataset. If your survey has a complex sample

#         design (e.g., you took a sample from your target population with stratification, clustering, etc.),

#         you should apply your design specifications (strata, cluster, weights, etc.) as you would do when

#         analyzing other survey variables.

####################################################

##########INPUTS###############################

#Specify your data folder and csv dataset name folder <- "FOLDER PATH"

dataname <- "instructional staff data.csv"

##########SCALING PROGRAM######################

#Option for decimal places options(digits=10)

#Rounding function that rounds .5 up to the next integer round2 = function(x, n) {

posneg = sign(x) z = abs(x)*10^n z = z + 0.5

z = trunc(z) z = z/10^n z*posneg

}

#Scale score calculation function rasch <- function(scalelist=NULL){

#Stepvalues that are estimated from the national benchmarking. These are the values in Table A5 of the national benchmarking documentation

#(https://safesupportivelearning.ed.gov/sites/default/files/EDSCLS_Psy chometric_Benchmarking_Technical_Report.pdf).

#These three sets of values are used to calculate the scale scores for any survey takers.

c0 <- c("IENGCLC2","IENGCLC3","IENGCLC4","IENGCLC6","IENGCLC7","IENGCLC8","I ENGREL9","IENGREL10","IENGREL12","IENGREL14","IENGREL15",

"IENGPAR29","IENGPAR31","IENGPAR32","IENGPAR36","IENGPAR42","IENGPAR48 ","ISAFEMO52","ISAFEMO53","ISAFEMO54","ISAFEMO55",

"ISAFEMO56","ISAFEMO58","ISAFPSAF60","ISAFPSAF61","ISAFPSAF62","ISAFPS AF64","ISAFPSAF66","ISAFPSAF67","ISAFBUL68",

"ISAFBUL69","ISAFBUL71","ISAFBUL73","ISAFBUL79","ISAFBUL80","ISAFBUL81 ","ISAFBUL82","ISAFSUB86","ISAFSUB87","ISAFSUB88","ISAFSUB91",

"IENVPENV97","IENVPENV98","IENVPENV100","IENVPENV101","IENVPENV102","I ENVPENV103","IENVINS105","IENVINS107","IENVINS108",

"IENVINS110","IENVINS115","IENVINS116","IENVPHEA119","IENVPHEA120","IE NVPHEA121","IENVPHEA122","IENVMEN123","IENVMEN125",

"IENVMEN126","IENVMEN128","IENVMEN137","IENVDIS129","IENVDIS130","IENV DIS134","IENVDIS134C","IENVDIS135","IENVDIS136")

c1 <- c(-2.23882,-2.1565,-2.61022,-2.72424,-1.78468,-2.18346,-

3.02788,-3.22684,-4.12339,-2.32472,-2.97012,-1.64759,-1.47893,-

1.83183,-1.11894,-2.28667,-2.70965,-1.42847,-0.92065,-0.92441,-

1.62728,-1.32451,-2.51414,-1.06057,-1.90306,-1.86559,-3.0375,-

2.14065,-0.55663,-0.98262,-0.77695,-2.52361,-2.61231,-2.28638,-

2.09304,-2.17942,-2.01788,-2.00464,-2.09068,-2.06953,-2.41182,-

2.00263,-2.09549,-1.75794,-1.55537,-1.61969,-1.31004,-1.14778,-

1.54741,-2.46068,-2.87387,-3.11921,-2.09029,-2.57512,-2.25692,-

3.02897,-2.43883,-2.07875,-2.19165,-2.21058,-2.17242,-1.99021,-

2.30585,-2.91403,-1.13436,-1.39281,-1.23245,-2.30109)

c2 <- c(-0.79286,-1.23134,-0.48501,-1.52987,0.14887,-0.80117,- 1.31162,-0.8068,-0.3855,0.08648,-1.66979,-

0.18685,0.23174,0.95631,0.45601,0.09988,-1.61862,-1.18202,-0.08686,-

0.10689,-0.51158,-1.37726,-1.22259,0.00142,-0.25214,-0.38786,-

1.91849,-1.97713,0.62288,0.84634,1.43081,-0.48166,-0.79026,-1.89,-

2.12925,-2.10602,-1.89899,0.08366,0.09402,-0.35578,0.22724,-1.31231,-

1.38865,-0.64084,-1.02356,-0.74721,-0.46397,0.49282,0.38599,-1.08837,-

1.38221,-2.31483,-1.01021,-0.17834,-0.28418,-0.80828,-0.39363,-

1.12673,-0.13682,-0.19846,0.5032,-0.29119,-1.0817,-2.43138,0.16961,-

0.02434,0.14534,-1.50784) c3 <-

c(1.63635,1.6155,3.37974,1.8249,2.95479,3.1319,2.68007,2.92047,3.8331, 3.0916,2.16597,3.1684,3.2615,4.79256,3.49956,3.78357,1.54464,1.84011,2

.70973,2.13721,1.98949,2.08055,2.14039,3.10718,2.78974,2.89179,1.35532

,0.98397,2.86862,4.3873,4.28451,3.55645,2.1421,1.04757,1.02448,0.94933

,0.96563,3.3934,3.48374,3.19388,3.39829,1.49996,1.69196,2.34516,2.3859

,2.53191,2.63674,4.44998,4.13798,3.33738,2.09191,1.70813,2.27599,3.113

42,3.19722,3.13805,3.23224,2.2716,3.24814,3.25992,3.93348,3.36141,2.16

889,1.70719,2.46815,2.76214,2.74392,2.24373)

Rstepvalues <- as.data.frame(rbind(c1,c2,c3)) names(Rstepvalues) <- c0

#Items that are negatively valenced. They are reverse-coded below.

negative <- c("ISAFPSAF60","ISAFPSAF61","ISAFPSAF62","ISAFPSAF64","ISAFPSAF66","IS AFPSAF67","ISAFBUL68","ISAFBUL69","ISAFBUL79",

"ISAFBUL80","ISAFBUL81","ISAFBUL82","IENVPENV100","IENVPENV101","IENVP ENV102","IENVPENV103")

#Loop through the scale list for (scalename in scalelist) {

#Stepvalues for the items of the current scale stepvalues <- Rstepvalues[eval(as.name(scalename))]

items <- names(stepvalues)

#Only use the variables that belong to scales tempdata <- indata[c0]

#Code negative values to missing tempdata[tempdata < 0] <- NA

#Reverse code negatively valenced items tempdata[negative] <- 5-tempdata[negative]

#Use only the variables of the current scale tempdata <- tempdata[items]

#Recode values from the range of 1-4 to 0-3 tempdata <- tempdata[]-1

model.

#The algorithm below is based on a Rasch partial credit

#See section 3.1 of the national benchmarking documentation

referenced above for more details.

#Further reading: http://www.winsteps.com/a/Linacre- estimation-further-topics.pdf

tdata <- tempdata

tdata$counts = apply(tdata,1,function(x) sum(!is.na(x[]))) titems <- names(stepvalues)

tstepvalues <- stepvalues

tv1 <- paste("tv1", titems, sep="") tv2 <- paste("tv2", titems, sep="") tv3 <- paste("tv3", titems, sep="") tdata[tv1] <- stepvalues[1,] tdata[tv2] <- stepvalues[2,] tdata[tv3] <- stepvalues[3,]

nm <- paste("nm", titems, sep="") tdata[nm] <- tdata[titems]*0+1

tdata$anm <- rowSums(tdata[nm],na.rm=TRUE) tdata[tv1] <- tdata[tv1]*tdata[nm] tdata[tv2] <- tdata[tv2]*tdata[nm] tdata[tv3] <- tdata[tv3]*tdata[nm] tdata$rl <- tdata$anm*0

tdata$ru <- tdata$anm*3 tdata$ru[tdata$ru==0] <- 1

tdata$r <- rowSums(tdata[items],na.rm=TRUE) tdata$r[tdata$r==tdata$rl] <-

tdata$rl[tdata$r==tdata$rl]+0.3

tdata$r[tdata$r==tdata$ru] <- tdata$ru[tdata$r==tdata$ru]-

0.3

d <- mean(colMeans(tstepvalues[],na.rm=TRUE))

tdata$t0 <- d + log((tdata$r-tdata$rl)/(tdata$ru-tdata$r)) tdata$t <- 0

tdata$t1 <- 1

iter <- 1

while(max(abs(tdata$t1-tdata$t),na.rm=TRUE)>0.0000001 &

iter <=100) {

if (iter==1) {tdata$t <- tdata$t0

} else if (iter>1) {tdata$t <- tdata$t1} iter <- iter+1

for (n in 1:ncol(tstepvalues)) {

jp <- paste("jp", titems[n], sep="")

j2p <- paste("j2p", titems[n], sep="")

dj <- paste("dj", titems[n], sep="") tdata$p1 <- exp(1*tdata$t-tdata[,tv1[n]])/

(1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p2 <- exp(2*tdata$t-tdata[,tv1[n]]-

tdata[,tv2[n]])/

(1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p3 <- exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]])/

(1+exp(1*tdata$t- tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p1[is.na(tdata[,tv1[n]])] <- NA

tdata$p2[is.na(tdata[,tv1[n]])] <- NA

tdata$p3[is.na(tdata[,tv1[n]])] <- NA tdata[jp] <- 1*tdata$p1+2*tdata$p2+3*tdata$p3 tdata[j2p] <-

1*1*tdata$p1+2*2*tdata$p2+3*3*tdata$p3

tdata[dj] <- tdata[j2p]-tdata[jp]*tdata[jp]

}

tdata$e <- rowSums(tdata[, grep("jp", names(tdata))],na.rm = TRUE)

tdata$v <- rowSums(tdata[, grep("dj", names(tdata))],na.rm = TRUE)

tdata$t1 <- tdata$t+(tdata$r- tdata$e)/pmax(2*tdata$v,1,na.rm = TRUE)

}

#Set to missing if the algorithm did not converge tdata$t1[tdata$t1>30] <- NA

#Only calculate score scores for those valid responses to at least three items and more than half of the items

tdata$t1[tdata$counts<3 | tdata$counts<0.5*ncol(tempdata)] <- NA

##########SCALING FACTORS######################

#These factors are used to recale the scale scores so that the cut scores are 300 and 400 for all scales.

#This is a linear transformation so it does not affect the relative distance between two scores: The distance is inflated by a constant factor B.

#See section 3.3 the national benchmarking documentation referenced above for more details.

if (scalename == 'eng' ) { A <- 316.291

B <- 29.053

}

if (scalename == 'clc' ) { A <- 325.048

B <- 31.112

}

if (scalename == 'rel' ) { A <- 322.095

B <- 26.592

}

if (scalename == 'par' ) { A <- 298.823

B <- 30.178

}

if (scalename == 'saf' ) { A <- 323.415

B <- 31.995

}

if (scalename == 'emo' ) { A <- 327.561

B <- 33.799

}

if (scalename == 'psaf' ) {

A <- 324.906

B <- 32.092

}

if (scalename == 'bul' ) {

A <- 338.35

B <- 30.829

}

if (scalename == 'sub' ) {

A <- 299.599

B <- 29.819

}

if (scalename == 'env' ) { A <- 320.62

B <- 29.264

}

if (scalename == 'penv' ) { A <- 330.34

B <- 31.939

}

if (scalename == 'ins' ) {

A <- 324.968

B <- 25.403

}

if (scalename == 'phea') { A <- 311.666

B <- 27.864

}

if (scalename == 'men' ) { A <- 307.097

B <- 28.889

}

if (scalename == 'dis' ) { A <- 328.255

B <- 30.631

}

#Rescale the scores based on the factors above tdata$tscore <- round2(tdata$t1*B+A,0)

#Keep only the calculated scale scores score_data <- tdata[c("tscore")]

#Rename the data using the scale name and save it to the workspace

colnames(score_data) <- paste(scalename, colnames(score_data),

sep = "_")

assign(scalename,score_data,envir = .GlobalEnv)

}

}

##########SPECIFICATION AND OUTPUT######################

#Read in data

indata <- read.table(file=sprintf("%s\\%s", folder, dataname),header=TRUE,sep=",")

names(indata) <- toupper(names(indata))

#Item names for each scale eng <-

c("IENGCLC2","IENGCLC3","IENGCLC4","IENGCLC6","IENGCLC7","IENGCLC8","I ENGREL9","IENGREL10","IENGREL12","IENGREL14","IENGREL15",

"IENGPAR29","IENGPAR31","IENGPAR32","IENGPAR36","IENGPAR42","IENGPAR48 ")

clc <- c("IENGCLC2","IENGCLC3","IENGCLC4","IENGCLC6","IENGCLC7","IENGCLC8") rel <- c("IENGREL9","IENGREL10","IENGREL12","IENGREL14","IENGREL15")

par <- c("IENGPAR29","IENGPAR31","IENGPAR32","IENGPAR36","IENGPAR42","IENGPAR 48")

saf <- c("ISAFEMO52","ISAFEMO53","ISAFEMO54","ISAFEMO55",

"ISAFEMO56","ISAFEMO58","ISAFPSAF60","ISAFPSAF61","ISAFPSAF62","ISAFPS AF64","ISAFPSAF66","ISAFPSAF67","ISAFBUL68",

"ISAFBUL69","ISAFBUL71","ISAFBUL73","ISAFBUL79","ISAFBUL80","ISAFBUL81 ","ISAFBUL82","ISAFSUB86","ISAFSUB87","ISAFSUB88","ISAFSUB91")

emo <- c("ISAFEMO52","ISAFEMO53","ISAFEMO54","ISAFEMO55","ISAFEMO56","ISAFEMO 58")

psaf <- c("ISAFPSAF60","ISAFPSAF61","ISAFPSAF62","ISAFPSAF64","ISAFPSAF66","IS AFPSAF67")

bul <- c("ISAFBUL68","ISAFBUL69","ISAFBUL71","ISAFBUL73","ISAFBUL79","ISAFBUL 80","ISAFBUL81","ISAFBUL82")

sub <- c("ISAFSUB86","ISAFSUB87","ISAFSUB88","ISAFSUB91")

env <- c("IENVPENV97","IENVPENV98","IENVPENV100","IENVPENV101","IENVPENV102", "IENVPENV103","IENVINS105","IENVINS107","IENVINS108",

"IENVINS110","IENVINS115","IENVINS116","IENVPHEA119","IENVPHEA120","IE NVPHEA121","IENVPHEA122","IENVMEN123","IENVMEN125",

"IENVMEN126","IENVMEN128","IENVMEN137","IENVDIS129","IENVDIS130","IENV DIS134","IENVDIS134C","IENVDIS135","IENVDIS136")

penv <- c("IENVPENV97","IENVPENV98","IENVPENV100","IENVPENV101","IENVPENV102", "IENVPENV103")

ins <- c("IENVINS105","IENVINS107","IENVINS108","IENVINS110","IENVINS115","IE NVINS116")

phea <- c("IENVPHEA119","IENVPHEA120","IENVPHEA121","IENVPHEA122")

men <- c("IENVMEN123","IENVMEN125","IENVMEN126","IENVMEN128","IENVMEN137")

dis <- c("IENVDIS129","IENVDIS130","IENVDIS134","IENVDIS134C","IENVDIS135","I ENVDIS136")

#Run scaling program for each scale rasch(scalelist=c("eng","clc","rel","par","saf","emo","psaf","bul","su

b","env","penv","ins","phea","men","dis"))

#Attach the scale score data to the input data out <-

cbind(indata,eng,clc,rel,par,saf,emo,psaf,bul,sub,env,penv,ins,phea,me n,dis)

#Exclude cases if they did not provide any demographic information for (v in

c("eng","clc","rel","par","saf","emo","psaf","bul","sub","env","penv", "ins","phea","men","dis")) {

out[which((is.na(out$IDEMO138) | out$IDEMO138 == "" | out$IDEMO138

== -1) & (is.na(out$IDEMO139) | out$IDEMO139 == "" | out$IDEMO139 == -

1) & (is.na(out$IDEMO140) | out$IDEMO140 == "" | out$IDEMO140 == -

1)),paste(v,"_tscore",sep="")] <- NA

}

##########OUTPUT DATASET WITH SCALE SCORES###############

write.table(out, sprintf("%s\\scale_score_%s", folder, dataname), col.names=T,row.names=F,sep=",")

##########END OF PROGRAM#################################

N O T E
For users wanting to preserve legacy trend lines: If you would like to generate scale scores as produced by the platform prior to the release of VM 3.0 in November 2017 (aka “legacy” scale scores), the EDSCLS Help Desk can send you the legacy R code. Please contact the Help Desk at edscls@air.org or (866) 730-6735.

##########INSTRUCTION###############################

#You only need to specify your data folder and csv dataset name in the "INPUTS" section below

#The output dataset will be produced in the folder you specify

#Comment: This program calculates scale scores for each survey taker. The calculated scale scores are just

#         additional analysis variables attached to your input dataset. If your survey has a complex sample

#         design (e.g., you took a sample from your target population with stratification, clustering, etc.),

#         you should apply your design specifications (strata, cluster, weights, etc.) as you would do when

#         analyzing other survey variables.

####################################################

##########INPUTS###############################

#Specify your data folder and csv dataset name folder <- "FOLDER PATH"

dataname <- "noninstructional staff data.csv"

##########SCALING PROGRAM######################

#Option for decimal places options(digits=10)

#Rounding function that rounds .5 up to the next integer round2 = function(x, n) {

posneg = sign(x) z = abs(x)*10^n z = z + 0.5

z = trunc(z) z = z/10^n z*posneg

}

#Scale score calculation function rasch <- function(scalelist=NULL){

#Stepvalues that are estimated from the national benchmarking. These are the values in Table A6 of the national benchmarking documentation

#(https://safesupportivelearning.ed.gov/sites/default/files/EDSCLS_Psy chometric_Benchmarking_Technical_Report.pdf).

#These three sets of values are used to calculate the scale scores for any survey takers.

c0 <- c("NENGCLC2","NENGCLC3","NENGCLC4","NENGCLC6","NENGCLC7","NENGCLC8","N ENGREL16","NENGREL17","NENGREL18","NENGREL24","NENGREL25","NENGREL30",

"NENGPAR34","NENGPAR37","NENGPAR38","NENGPAR44","NENGPAR47","NSAFEMO51 ","NSAFEMO52","NSAFEMO53","NSAFEMO54","NSAFEMO55","NSAFEMO148",

"NSAFPSAF57","NSAFPSAF58","NSAFPSAF59","NSAFPSAF61","NSAFPSAF63","NSAF PSAF64","NSAFBUL65","NSAFBUL66","NSAFBUL70","NSAFBUL76","NSAFBUL77",

"NSAFBUL78","NSAFBUL79","NSAFSUB83","NSAFSUB84","NSAFSUB85","NSAFSUB87 ","NSAFSUB88","NENVPENV97","NENVPENV98","NENVPENV99","NENVPENV100",

"NENVPENV102","NENVPENV103","NENVINS109","NENVINS110","NENVINS111","NE NVINS140","NENVPHEA115","NENVPHEA117","NENVPHEA118",

"NENVPHEA119","NENVMEN122","NENVMEN125","NENVMEN126","NENVMEN127","NEN VDIS130","NENVDIS131","NENVDIS132","NENVDIS134","NENVDIS134C",

"NENVDIS135","NENVDIS136","NENVDIS137")

c1 <- c(-2.41269,-2.90456,-2.54538,-2.44946,-2.37193,-2.71602,-

2.51626,-3.51999,-2.47435,-3.24542,-2.48313,-3.09321,-1.81246,-

2.66216,-2.32825,-3.02065,-3.21406,-1.0321,-0.96298,-0.91493,-

0.99467,-1.74572,-2.7771,-0.90158,-2.04704,-1.87806,-3.11085,-

1.92372,-0.52093,-1.08067,-0.96739,-1.99696,-1.42134,-1.39773,-

1.99819,-0.85593,-1.4652,-1.7502,-1.95507,-1.37448,-1.48793,-1.31365,-

2.04206,-1.80633,-1.83406,-1.66769,-2.25032,-2.80502,-3.03108,-

1.45675,-3.53932,-2.20742,-2.31603,-2.86615,-2.70978,-1.49341,-

2.46824,-1.7135,-2.22951,-2.26218,-2.98917,-2.50782,-0.79484,-

0.72395,-3.00002,-0.93667,-1.65967)

c2 <- c(-0.62783,-0.93949,-0.35921,-1.91379,-0.49578,-0.90214,-

1.33325,-0.57221,-0.64719,-1.08817,-0.03373,-

0.94521,0.2645,0.20785,0.96649,-0.16654,-1.66755,-1.26082,-0.37968,-

0.43057,-0.8671,-1.55043,-0.80514,0.26985,-0.5574,-0.67698,-1.76912,-

1.89623,0.55635,0.66543,1.10514,-0.93873,-2.01817,-2.26507,-2.08219,-

2.28479,-0.30073,-0.21784,-0.61555,-0.59265,-0.14093,-0.75334,-

1.53466,-1.47464,-0.83077,-1.64181,-1.1453,-1.10456,-0.14643,-

0.85192,-1.21007,-0.51886,-0.76917,-1.32258,-0.54017,-

0.79424,0.31508,-0.05805,-0.99588,-1.08258,-2.15103,-1.84725,0.24373,-

0.00727,-0.59234,-0.10022,-1.39865)

c3 <- c(2.01573,2.21546,3.94359,2.20042,3.0729,3.1253,3.44854,3.47261,3.5087 1,2.71541,3.31846,3.72407,3.79969,3.80338,5.37724,4.01239,2.26929,1.70

307,2.01959,1.7704,1.72993,2.12222,2.22456,3.05953,2.66121,2.70934,1.2

1185,1.02633,2.64525,4.41386,3.90358,2.38629,1.17343,1.03809,0.92795,0

.98601,2.99407,3.24186,3.06233,3.20019,3.40319,2.46686,2.51556,2.54883

,2.84167,2.19677,2.43437,2.80337,3.53325,2.6866,2.43721,3.27185,2.9454

8,3.29168,3.54043,3.55629,4.24422,4.06839,2.78672,2.67435,2.0923,2.355

42,2.72603,3.01571,3.61033,3.33175,2.96215)

Rstepvalues <- as.data.frame(rbind(c1,c2,c3)) names(Rstepvalues) <- c0

#Items that are negatively valenced. They are reverse-coded below.

negative <-

c("NSAFPSAF57","NSAFPSAF58","NSAFPSAF59","NSAFPSAF61","NSAFPSAF63","NS AFPSAF64","NSAFBUL65","NSAFBUL66","NSAFBUL76","NSAFBUL77",

"NSAFBUL78","NSAFBUL79","NENVPENV97","NENVPENV98","NENVPENV99","NENVPE NV100")

#Loop through the scale list for (scalename in scalelist) {

#Stepvalues for the items of the current scale stepvalues <- Rstepvalues[eval(as.name(scalename))] items <- names(stepvalues)

#Only use the variables that belong to scales tempdata <- indata[c0]

#Code negative values to missing tempdata[tempdata < 0] <- NA

#Reverse code negatively valenced items tempdata[negative] <- 5-tempdata[negative]

#Use only the variables of the current scale tempdata <- tempdata[items]

#Recode values from the range of 1-4 to 0-3 tempdata <- tempdata[]-1

#The algorithm below is based on a Rasch partial credit model.

#See section 3.1 of the national benchmarking documentation referenced above for more details.

#Further reading: http://www.winsteps.com/a/Linacre-estimation- further-topics.pdf

tdata <- tempdata

tdata$counts = apply(tdata,1,function(x) sum(!is.na(x[]))) titems <- names(stepvalues)

tstepvalues <- stepvalues

tv1 <- paste("tv1", titems, sep="") tv2 <- paste("tv2", titems, sep="") tv3 <- paste("tv3", titems, sep="") tdata[tv1] <- stepvalues[1,] tdata[tv2] <- stepvalues[2,] tdata[tv3] <- stepvalues[3,]

nm <- paste("nm", titems, sep="")

tdata[nm] <- tdata[titems]*0+1

tdata$anm <- rowSums(tdata[nm],na.rm=TRUE) tdata[tv1] <- tdata[tv1]*tdata[nm] tdata[tv2] <- tdata[tv2]*tdata[nm]

tdata[tv3] <- tdata[tv3]*tdata[nm] tdata$rl <- tdata$anm*0

tdata$ru <- tdata$anm*3 tdata$ru[tdata$ru==0] <- 1

tdata$r <- rowSums(tdata[items],na.rm=TRUE) tdata$r[tdata$r==tdata$rl] <-

tdata$rl[tdata$r==tdata$rl]+0.3

tdata$r[tdata$r==tdata$ru] <- tdata$ru[tdata$r==tdata$ru]-

0.3

d <- mean(colMeans(tstepvalues[],na.rm=TRUE))

tdata$t0 <- d + log((tdata$r-tdata$rl)/(tdata$ru-tdata$r)) tdata$t <- 0

tdata$t1 <- 1

iter <- 1

while(max(abs(tdata$t1-tdata$t),na.rm=TRUE)>0.0000001 &

iter <=100) {

if (iter==1) {tdata$t <- tdata$t0

} else if (iter>1) {tdata$t <- tdata$t1} iter <- iter+1

for (n in 1:ncol(tstepvalues)) {

jp <- paste("jp", titems[n], sep="")

j2p <- paste("j2p", titems[n], sep="")

dj <- paste("dj", titems[n], sep="")

tdata$p1 <- exp(1*tdata$t-tdata[,tv1[n]])/ (1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p2 <- exp(2*tdata$t-tdata[,tv1[n]]-

tdata[,tv2[n]])/

(1+exp(1*tdata$t-

tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p3 <- exp(3*tdata$t-tdata[,tv1[n]]-

tdata[,tv2[n]]-tdata[,tv3[n]])/

(1+exp(1*tdata$t- tdata[,tv1[n]])+exp(2*tdata$t-tdata[,tv1[n]]-tdata[,tv2[n]])+

exp(3*tdata$t-tdata[,tv1[n]]- tdata[,tv2[n]]-tdata[,tv3[n]]))

tdata$p1[is.na(tdata[,tv1[n]])] <- NA

tdata$p2[is.na(tdata[,tv1[n]])] <- NA

tdata$p3[is.na(tdata[,tv1[n]])] <- NA tdata[jp] <- 1*tdata$p1+2*tdata$p2+3*tdata$p3 tdata[j2p] <-

1*1*tdata$p1+2*2*tdata$p2+3*3*tdata$p3

tdata[dj] <- tdata[j2p]-tdata[jp]*tdata[jp]

}

tdata$e <- rowSums(tdata[, grep("jp", names(tdata))],na.rm = TRUE)

tdata$v <- rowSums(tdata[, grep("dj", names(tdata))],na.rm = TRUE)

tdata$t1 <- tdata$t+(tdata$r- tdata$e)/pmax(2*tdata$v,1,na.rm = TRUE)

}

#Set to missing if the algorithm did not converge tdata$t1[tdata$t1>30] <- NA

#Only calculate score scores for those valid responses to at least three items and more than half of the items

tdata$t1[tdata$counts<3 | tdata$counts<0.5*ncol(tempdata)] <- NA

##########SCALING FACTORS######################

#These factors are used to recale the scale scores so that the cut scores are 300 and 400 for all scales.

#This is a linear transformation so it does not affect the relative distance between two scores: The distance is inflated by a constant factor B.

#See section 3.3 the national benchmarking documentation referenced above for more details.

if (scalename == 'eng' ) { A <- 316.027

B <- 25.661

}

if (scalename == 'clc' ) { A <- 324.047

B <- 27.686

}

if (scalename == 'rel' ) { A <- 318.803

B <- 24.114

}

if (scalename == 'par' ) { A <- 300.916

B <- 25.772

}

if (scalename == 'saf' ) { A <- 328.465

B <- 31.578

}

if (scalename == 'emo' ) { A <- 332.304

B <- 35.115

}

if (scalename == 'psaf' ) { A <- 327.846

B <- 32.555

}

if (scalename == 'bul' ) {

A <- 342.271

B <- 32.014

}

if (scalename == 'sub' ) {

A <- 310.583

B <- 28.119

}

if (scalename == 'env' ) {

A <- 323.636

B <- 25.919

}

if (scalename == 'penv' ) { A <- 333.383

B <- 26.649

}

if (scalename == 'ins' ) { A <- 322.626

B <- 27.086

}

if (scalename == 'phea') {

A <- 319.552

B <- 24.661

}

if (scalename == 'men' ) { A <- 309.507

B <- 24.669

}

if (scalename == 'dis' ) { A <- 326.977

B <- 25.694

}

#Rescale the scores based on the factors above tdata$tscore <- round2(tdata$t1*B+A,0)

#Keep only the calculated scale scores score_data <- tdata[c("tscore")]

#Rename the data using the scale name and save it to the workspace

colnames(score_data) <- paste(scalename, colnames(score_data), sep = "_")

assign(scalename,score_data,envir = .GlobalEnv)

}

}

##########SPECIFICATION AND OUTPUT######################

#Read in data

indata <- read.table(file=sprintf("%s\\%s", folder, dataname),header=TRUE,sep=",")

names(indata) <- toupper(names(indata))

#Item names for each scale eng <-

c("NENGCLC2","NENGCLC3","NENGCLC4","NENGCLC6","NENGCLC7","NENGCLC8","N ENGREL16","NENGREL17","NENGREL18","NENGREL24","NENGREL25","NENGREL30",

"NENGPAR34","NENGPAR37","NENGPAR38","NENGPAR44","NENGPAR47")

clc <-

c("NENGCLC2","NENGCLC3","NENGCLC4","NENGCLC6","NENGCLC7","NENGCLC8")

rel <-

c("NENGREL16","NENGREL17","NENGREL18","NENGREL24","NENGREL25","NENGREL 30")

par <- c("NENGPAR34","NENGPAR37","NENGPAR38","NENGPAR44","NENGPAR47")

saf <- c("NSAFEMO51","NSAFEMO52","NSAFEMO53","NSAFEMO54","NSAFEMO55","NSAFEMO 148",

"NSAFPSAF57","NSAFPSAF58","NSAFPSAF59","NSAFPSAF61","NSAFPSAF63","NSAF PSAF64","NSAFBUL65","NSAFBUL66","NSAFBUL70","NSAFBUL76","NSAFBUL77",

"NSAFBUL78","NSAFBUL79","NSAFSUB83","NSAFSUB84","NSAFSUB85","NSAFSUB87 ","NSAFSUB88")

emo <- c("NSAFEMO51","NSAFEMO52","NSAFEMO53","NSAFEMO54","NSAFEMO55","NSAFEMO 148")

psaf <- c("NSAFPSAF57","NSAFPSAF58","NSAFPSAF59","NSAFPSAF61","NSAFPSAF63","NS AFPSAF64")

bul <-

c("NSAFBUL65","NSAFBUL66","NSAFBUL70","NSAFBUL76","NSAFBUL77","NSAFBUL 78","NSAFBUL79")

sub <- c("NSAFSUB83","NSAFSUB84","NSAFSUB85","NSAFSUB87","NSAFSUB88") env <- c("NENVPENV97","NENVPENV98","NENVPENV99","NENVPENV100",

"NENVPENV102","NENVPENV103","NENVINS109","NENVINS110","NENVINS111","NE NVINS140","NENVPHEA115","NENVPHEA117","NENVPHEA118",

"NENVPHEA119","NENVMEN122","NENVMEN125","NENVMEN126","NENVMEN127","NEN VDIS130","NENVDIS131","NENVDIS132","NENVDIS134","NENVDIS134C",

"NENVDIS135","NENVDIS136","NENVDIS137")

penv <-

c("NENVPENV97","NENVPENV98","NENVPENV99","NENVPENV100","NENVPENV102"," NENVPENV103")

ins <- c("NENVINS109","NENVINS110","NENVINS111","NENVINS140")  phea <- c("NENVPHEA115","NENVPHEA117","NENVPHEA118","NENVPHEA119") men <- c("NENVMEN122","NENVMEN125","NENVMEN126","NENVMEN127")

dis <-

c("NENVDIS130","NENVDIS131","NENVDIS132","NENVDIS134","NENVDIS134C","N ENVDIS135","NENVDIS136","NENVDIS137")

#Run scaling program for each scale rasch(scalelist=c("eng","clc","rel","par","saf","emo","psaf","bul","su

b","env","penv","ins","phea","men","dis"))

#Attach the scale score data to the input data out <-

cbind(indata,eng,clc,rel,par,saf,emo,psaf,bul,sub,env,penv,ins,phea,me n,dis)

#Exclude cases if they did not provide any demographic information for (v in

c("eng","clc","rel","par","saf","emo","psaf","bul","sub","env","penv",

"ins","phea","men","dis")) {

out[which((is.na(out$NDEMO142) | out$NDEMO142 == "" | out$NDEMO142

== -1) & (is.na(out$NDEMO143) | out$NDEMO143 == "" | out$NDEMO143 == -

1) & (is.na(out$NDEMO144) | out$NDEMO144 == "" | out$NDEMO144 == - 1)),paste(v,"_tscore",sep="")] <- NA

}

##########OUTPUT DATASET WITH SCALE SCORES###############

write.table(out, sprintf("%s\\scale_score_%s", folder, dataname), col.names=T,row.names=F,sep=",")

##########END OF PROGRAM#################################

American Institutes for Research

U.S. Department of Education

The contents of the National Center on Safe Supportive Learning Environments Web site were assembled under contracts from the U.S. Department of Education, Office of Safe and Supportive Schools to the American Institutes for Research (AIR), Contract Number  91990021A0020.

This Web site is operated and maintained by AIR. The contents of this Web site do not necessarily represent the policy or views of the U.S. Department of Education nor do they imply endorsement by the U.S. Department of Education.

©2025 American Institutes for Research — Disclaimer   |   Privacy Policy   |   Accessibility Statement