First Add Some R Packages to the Workspace.
Caution: warning messages are suppressed to reduce clutter in the output.
tidyverse: Importing data, cleaning data, data manipulation, & data visualization
kableExtra: Build HTML tables
DataExplorer: Exploratory Data Analysis & Feature Engineering
tableone: Standardized mean differences for before and after matching
survey: Matched data with match weights
Matching: Propensity score matching
cobalt: Covariate balance
reshape2: Covariate balance plot
rbounds: Rosenbaum Sensitivity test
library(tidyverse)
library(kableExtra)
library(DataExplorer)
library(tableone)
library(survey)
library(Matching)
library(cobalt)
library(reshape2)
library(rbounds)
select <- dplyr::select # Resolves package conflicts with select
options(width = 120) # Format print width
General functions used throughout the analysis.
# Update palN for Chem 24 Spring 2019 ------------------------------------------
update.chem24s19 <- function(chem.dat) {
PAL.course.data <- read_rds("palCourseData.rds")
chem24.S19 <- PAL.course.data %>%
filter(term == "Spring 2019", course == "CHEM 24")
# Add a palN indicator for Chem 24 Spring 2019
chem24.S19 <- chem24.S19 %>%
mutate(palN.chem24.S19 = case_when(
pal.grade == "CR" ~ 2,
is.na(pal.grade) ~ 0,
TRUE ~ 1
)) %>%
select(emplid, palN.chem24.S19)
# Check how many student are non-PAL, incomplete PAL, and PAL
table(chem24.S19$palN.chem24.S19)
# 0 1 2
# 51 10 52
chem.dat <- left_join(chem.dat, chem24.S19, by= "emplid" )
chem.dat <- chem.dat %>%
mutate(palN = case_when(
course == "CHEM 24" & term == "Spring 2019" ~ palN.chem24.S19,
TRUE ~ palN
)) %>%
select(-palN.chem24.S19)
return(chem.dat)
}
# Get raw table of mean gpa for PAL and non-PAL -------------------------------
get.raw.tab <- function(classes, df)
{
raw.table = data.frame(class=character(),
nonPALavg=numeric(),
PALavg=numeric(),
Diff=numeric(),
NonPAL_Num= integer(),
PAL_Num=integer(),
CompletePAL=numeric(),
TermPALStart=integer(),
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:length(classes))
{
curr.class = classes[i]
temp = subset(df, course==curr.class & course.seq==0)
pal.start=min(unique(temp$term.code[temp$palN==2]))
# only include terms after PAL start term
temp = subset(temp, term.code>= pal.start)
x=tapply(temp$grd.pt.unt,temp$palN,
mean, na.rm=T) %>%
as.numeric %>%
round(2)
y=table(temp$palN) %>% as.numeric
raw.table[i, 'class' ] = curr.class
raw.table[i, c(2:4,7)]=c(x[1], x[3],x[3]-x[1],
round(y[3]/sum(y),2))
raw.table[i, c(5,6,8)]= c(y[1], y[3], pal.start)
}
# formatted table
raw.table <- kable(raw.table, caption = "Raw Comparison of PAL and non-PAL Grades (No Propensity Adjustment)") %>%
kable_styling(full_width= T, position = "left")
return(raw.table)
}
# Data cleaning ----------------------------------------------------------------
clean.data <- function(df)
{
# Replaced coh.term with coh.term.course
yr.course.taken = as.numeric(gsub(".*([0-9]{4})","\\1",df$coh.term.course))
df$delay.from.hs = ifelse(!is.na(yr.course.taken) & !is.na(df$hs.grad.yr),
yr.course.taken-df$hs.grad.yr, NA)
sum(is.na(df$delay.from.hs))
# remove students who did not complete PAL
df=subset(df, palN!=1)
#recode palN to factor with 0/1 levels
df$palN = ifelse(df$palN==2, 1, 0)
#clean up category names in m.rmd and e.rmd
df$m.rmd[df$m.rmd=="Not Remedial\nin Math"]="Not Remedial in Math"
df$m.rmd[df$m.rmd=="Remedial\nin Math"]="Remedial in Math"
df$e.rmd[df$e.rmd=="Not Remedial\nin English"]="Not Remedial in English"
df$e.rmd[df$e.rmd=="Remedial\nin English"]="Remedial in English"
df <- df %>% mutate(m.rmd = factor(m.rmd), e.rmd = factor(e.rmd))
# table(df$e.rmd)
# Create feature, proportion of cumulative units taken that were passes
# To distinguish the students who have taken 0 units from the students who
# have passed 0 units they have taken, students who have taken 0 units are
# labeled as -1. Then the -1 is replaced by the mean of cum.percent.units.passed
df <- df %>%
mutate(cum.percent.units.passed = ifelse(tot.taken.prgrss.start == 0, -1,
tot.passd.prgrss.start / tot.taken.prgrss.start)) %>%
mutate(cum.percent.units.passed = ifelse(cum.percent.units.passed == -1, mean(cum.percent.units.passed, na.rm =TRUE),
cum.percent.units.passed ))
# code instructor as alphabetic letter for anonymity
df$Instructor_01=droplevels(factor(df$Instructor_01))
instructor.vec = sort(unique(df$Instructor_01))
num.instr = length(instructor.vec)
df$Instructor_01 = factor(
df$Instructor_01, levels = instructor.vec, labels=as.character(1:num.instr)
)
key.instr.code = cbind(as.character(instructor.vec), 1:num.instr)
# Add "cMaj", census majors without concentrations/specializations/tracks/etc.
major_lookup <- read.csv("Census Major Lookup.csv", header = TRUE,
stringsAsFactors = FALSE)
df <- merge(df, major_lookup %>% select(censusMajor, cMaj),
by = "censusMajor", all.x = TRUE)
# Recode mother's education and father's education variables.
non.hs.grad= c("No High School","Some High School")
hs.grad= c("High School Graduate","Some College","2-Year College Graduate")
coll.grad= c("4-Year College Graduate","Postgraduate")
parent.ed.levels= c(
"Non-HS Graduate","HS Graduate", "College Graduate", "Unknown"
)
df <- df %>%
mutate(
mother.ed = ifelse(mother.ed %in% non.hs.grad, "Non-HS Graduate",
ifelse(mother.ed %in% hs.grad, "HS Graduate",
ifelse(mother.ed %in% coll.grad, "College Graduate", "Unknown"))),
mother.ed= factor(mother.ed, levels= parent.ed.levels),
father.ed = ifelse(father.ed %in% non.hs.grad,"Non-HS Graduate",
ifelse(father.ed %in% hs.grad, "HS Graduate",
ifelse(father.ed %in% coll.grad, "College Graduate", "Unknown"))),
father.ed= factor(father.ed, levels= parent.ed.levels))
# Recoded adm.area with these counties as local: 'El Dorado', 'Nevada',
# 'Placer', 'Sacramento', 'San Joaquin', 'Solano', 'Yolo'.
counties.rad <- read_csv(
"countiesRadius120mi.csv",
col_types = cols(
state = col_skip(), city = col_skip(), distance.km = col_skip()
)
)
df <- left_join(df, counties.rad, by = "zip")
local.adm.counties <- c(
'El Dorado', 'Nevada', 'Placer', 'Sacramento', 'San Joaquin', 'Solano',
'Yolo'
)
# County will be NA if the zip code is not within 120 mile radius of
# CSUS zip code(95819)
df <- df %>%
mutate(
adm.area =
if_else(!(county %in% local.adm.counties) | is.na(county),
"nonlocal", "local")
) %>%
mutate(sac.county.flg =
if_else(!(county == "Sacramento") | is.na(county), 0, 1)
) %>%
mutate(sac.county.flg = factor(sac.county.flg))
return(df)
}
# Extract prerequisite course grade ---------------------------------------------
get.prereq.grades <- function(course.df, df, prereq) {
# Get student's recent Chem 1B grade
course.stu <- course.df$emplid
prereq.df <- df %>%
select(emplid, course, course.seq, grd.pt.unt, grade) %>%
filter(emplid %in% course.stu, course== prereq) %>%
group_by(emplid) %>%
filter(course.seq == max(course.seq)) %>%
rename(
prereq.course.seq = course.seq, prereq.grd.pt.unt = grd.pt.unt,
prereq.grade = grade
) %>%
select(-course)
dim(prereq.df) # [1] 275 6
prereq.stu <- prereq.df$emplid
course.df <- course.df %>%
filter(emplid %in% prereq.stu)
course.df <- left_join(course.df, prereq.df, by = "emplid")
return(course.df)
}
# Get only the variables that have missing values ---------------------------------------------
get.missing.only <- function(course.df) {
get.missing.only <- course.df %>%
summarise(across(everything(), ~ sum(is.na(.x)))) %>%
gather() %>%
filter(value != 0)
get.missing.only <- course.df %>%
dplyr::select(all_of(get.missing.only$key))
return(get.missing.only)
}
# Get imbalanced variables with SMD > 0.1------------------------------------
get.imbal.vars <- function(tab)
{
get.imbal.vars <- as.data.frame(ExtractSmd(tab))
get.imbal.vars <- get.imbal.vars %>%
rownames_to_column(var = "Variable") %>%
rename(`Before Matching SMD` = `1 vs 2`) %>%
filter(`Before Matching SMD` > 0.1) %>%
arrange(desc(`Before Matching SMD`))
get.imbal.vars <- kable(
get.imbal.vars, caption = "Variables with SMD > 0.1"
) %>%
kable_styling(full_width= F)
return(get.imbal.vars)
}
# Unadjusted means -------------------------------------------------------------
get.unadj.means <- function(df.final)
{
get.unadj.means <- df.final %>%
group_by(palN) %>% summarise(unadj.means = mean(grd.pt.unt)) %>%
pivot_wider(names_from = "palN", values_from = "unadj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Diff. = `PAL`-`Non-PAL`)
get.unadj.means<- kable(
get.unadj.means, caption = "Unadjusted Mean Grades"
) %>%
kable_styling(full_width= F)
return(get.unadj.means)
}
# Adjusted means --------------------------------------------------------------
adj.means <- function(match.list, matched.dat) {
get.adj.means <- matched.dat %>%
group_by(palN) %>%
summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
pivot_wider(names_from = "palN", values_from = "adj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Diff. = `PAL`-`Non-PAL`)
# formatted table
get.adj.means<- kable(get.adj.means, caption = "Adjusted Mean Grades") %>%
kable_styling(full_width= F)
return(get.adj.means)
}
# Match Table ------------------------------------------------------------------
create.match.tab <- function(matched.dat) {
matched.dat <- matched.dat %>%
mutate(pal = if_else(palN == 0, "Non-PAL", "PAL"))
pal.flg <- c('Non-PAL', 'PAL')
for (i in seq_along(pal.flg)) {
multiple.matches <- matched.dat %>%
filter(pal ==pal.flg[i]) %>%
count(id) %>%
filter(n> 1) %>%
summarise(n())
single.matches <- matched.dat %>%
filter(pal == pal.flg[i]) %>%
count(id) %>%
filter(n==1) %>%
summarise(n())
if(pal.flg[i] == 'Non-PAL') {
match.tab <- bind_rows(single.matches, multiple.matches)
match.tab <- match.tab %>%
rename('Non-PAL'= 'n()')
}
pal.matches <- bind_rows(single.matches, multiple.matches)
match.tab$PAL <- pal.matches$`n()`
row.names(match.tab) <- c("Single Matches", "Multiple Matches")
}
match.tab <-rbind(
match.tab, "Total Students" = c(sum(match.tab$`Non-PAL`), sum(match.tab$`PAL`))
)
match.tab <- kable(match.tab, caption = "PAL and Non-PAL Matches") %>%
kable_styling(full_width= F)
return(match.tab)
}
# ATT plot ---------------------------------------------------------------------
# https://livefreeordichotomize.com/2019/01/17/understanding-propensity-score-weighting/
# https://www.csus.edu/brand/colors.html
get.att.plot <- function(df.final, match.list)
{
df.final$p.score <- p.score
df.final <- df.final %>%
select(-id) %>%
rownames_to_column(var = "id")
ps.dat <- df.final %>%
select(id, palN, p.score) %>%
pivot_wider(
names_from = "palN", values_from = "p.score", names_prefix = "b.pal."
)
before.match <- ps.dat %>%
select(b.pal.0, b.pal.1)
matched.dat <- df.final[unlist(match.list[c("index.treated", "index.control")]), ]
matched.dat$match.weights<- c(match.list$weights, match.list$weights)
after.match <-matched.dat %>%
select(-id) %>%
rownames_to_column(var = "id")
after.match <- after.match %>%
pivot_wider(names_from = "palN", values_from = "p.score", names_prefix = "pal.")
after.match <- after.match %>%
select(pal.0, pal.1, match.weights)
get.att.plot <- ggplot() +
geom_histogram(data = before.match, bins = 50, aes(b.pal.1), alpha = 0.5) +
geom_histogram(data = after.match,bins = 50, aes(pal.1, weight = match.weights),
fill = "#043927", alpha = 0.5) +
geom_histogram(data = before.match, bins = 50, alpha = 0.5,
aes(x = b.pal.0, y = -..count..)) +
geom_histogram(data = after.match, bins = 50,
aes(x = pal.0, weight = match.weights, y = -..count..),
fill = "#c4b581", alpha = 0.5) +
ylab("Count") + xlab("Propensity Scores") +
geom_hline(yintercept = 0, lwd = 0.5) +
scale_y_continuous(label = abs)
return(get.att.plot)
}
# Variable Percent Improvement -------------------------------------------------
get.var.perc.tab <- function(list.bal) {
get.var.perc.tab <- list.bal %>%
pluck("Balance") %>%
rownames_to_column("Variable") %>%
dplyr::select("Variable", "Type", "Diff.Un","Diff.Adj") %>%
mutate(`% Improvement` = if_else(Diff.Un == 0, 0, round(((abs(Diff.Un) - abs(Diff.Adj))/ abs(Diff.Un)) * 100 , 0))) %>%
arrange(desc(`% Improvement`))
get.var.perc.tab <- get.var.perc.tab %>% dplyr::select("Variable", "Diff.Un", "Diff.Adj", `% Improvement`)
return(get.var.perc.tab)
}
# Covariate Balance Plots -------------------------------------------------------
# https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
# https://www.csus.edu/brand/colors.html
get.bal.plot <- function(unmatched.tab, matched.tab) {
## Construct a data frame containing variable name and SMD from all methods
dataPlot <- data.frame(variable = rownames(ExtractSmd(unmatched.tab)),
Unmatched = as.numeric(ExtractSmd(unmatched.tab)),
Matched = as.numeric(ExtractSmd(matched.tab)) )
## Create long-format data for ggplot2
dataPlotMelt <- melt(data = dataPlot,
id.vars = c("variable"),
variable.name = "Method",
value.name = "SMD")
## Order variable names by magnitude of SMD
varNames <- as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
## Order factor levels in the same order
dataPlotMelt$variable <- factor(dataPlotMelt$variable,
levels = varNames)
## Plot using ggplot2
# Sac State colors and dashed line
get.bal.plot <-ggplot(
data = dataPlotMelt, mapping =
aes(x = variable, y = SMD, group = Method, color= Method)) +
scale_color_manual(values = c("#043927", "#c4b581")) +
geom_line(aes(linetype = Method)) +
geom_point() +
scale_linetype_manual(values= c("dashed", "solid")) +
geom_hline(yintercept = 0.1, color = "black", size = 0.1) +
coord_flip() +
theme_bw() + theme(legend.key = element_blank())
return(get.bal.plot)
}
# PAL Effect -------------------------------------------------------------------
get.pal.effect <- function(match.list, matched.dat, course) {
get.gamma <- psens(match.list, Gamma=2.0, GammaInc = 0.1)[["bounds"]] %>%
filter(`Lower bound` < 0.05 & 0.05 < `Upper bound`) %>%
slice_min(Gamma) %>%
select(Gamma)
get.pal.effect <- matched.dat %>%
group_by(palN) %>%
summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
pivot_wider(names_from = "palN", values_from = "adj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Course= course, .before= "Non-PAL") %>%
mutate(Diff. = `PAL`-`Non-PAL`) %>%
mutate(`Std. error`= match.list$se, .after= "Diff.") %>%
mutate(
`p-val`= formatC( 1-pnorm(Diff./`Std. error`), format = "e", digits = 2),
Sensitivity= get.gamma$Gamma,
`N(non-PAL)`= length(unique(match.list$index.control)),
`N(PAL)`= match.list$wnobs
)
return(get.pal.effect)
}
Specialized functions for each course.
## BIO 22 ====================================================================
## Filter to relevant variables
bio22.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 'course.count',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(bio22.dat))
bio22.final <- bio22.dat[ ,new.vars]
return(bio22.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
bio22.step <- function(final.df) {
# AP_CALAB
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score +
sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg,
data= bio22.final, family=binomial
)
summary(min.model)
biggest <- formula(glm(palN ~. - grd.pt.unt, data=bio22.final, family=binomial))
bio22.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
summary(bio22.step.first.order)
bio22.step.first.order$anova
model.first.order <- formula(bio22.step.first.order)
bio22.first.order.prop.model <- glm(
model.first.order, data=bio22.final, family=binomial
)
return(bio22.first.order.prop.model)
}
## CHEM 1A ====================================================================
## Filter to relevant variables
chem1a.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus','palN', 'grd.pt.unt', 'sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem1a.dat))
chem1a.final <- chem1a.dat[ ,new.vars]
return(chem1a.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem1a.step <- function(final.df) {
# Stepwise selection selected AP_CALAB.flg, AP_BIOL.flg, AP_CHEM, and
# AP_CHEM.flg
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score +
sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg + AP_BIOL +
AP_BIOL.flg + AP_CHEM + AP_CHEM.flg, data= chem1a.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt, data=chem1a.final, family=binomial)
)
chem1a.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2
)
summary(chem1a.step.first.order)
chem1a.step.first.order$anova
model.first.order <- formula(chem1a.step.first.order)
chem1a.first.order.prop.model <- glm(
model.first.order, data=chem1a.final, family=binomial
)
return(chem1a.first.order.prop.model)
}
## CHEM 1B ====================================================================
## Filter to relevant variables
chem1b.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1a.grd.pt.unt',
'AP_BIOL', 'AP_CALAB', 'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg',
'AP_CALBC.flg', 'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem1b.dat))
chem1b.final <- chem1b.dat[ ,new.vars]
return(chem1b.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem1b.step <- function(final.df) {
# Stepwise selection selected AP_BIOL.flg and AP_CHEM.flg
# Removed AP_BIOL.flg. Then stepwise selection selected AP_CALAB.flg.
# Removed AP_CALAB.flg and pct.female.head.flg
min.model <- glm(
palN ~ chem1a.grd.pt.unt + cum.percent.units.passed + eth.erss + gender +
AP_CHEM + AP_CHEM.flg, data= chem1b.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt - AP_BIOL.flg - AP_CALAB.flg - pct.female.head.flg,
data=chem1b.final, family=binomial)
)
chem1b.step.first.order <- step(min.model,
direction="forward",scope = biggest,
trace=FALSE, k=2)
summary(chem1b.step.first.order)
chem1b.step.first.order$anova
model.first.order <- formula(chem1b.step.first.order)
chem1b.first.order.prop.model <- glm(model.first.order, data=chem1b.final, family=binomial)
return(chem1b.first.order.prop.model)
}
## CHEM 4 ====================================================================
## Filter to relevant variables
chem4.step.vars <- function(course.df)
{
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt','sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem4.dat))
chem4.final <- chem4.dat[ ,new.vars]
return(chem4.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem4.step <- function(final.df)
{
# "AP_BIOL"
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender+ sat.math.score +
sat.verbal.score+sat.math.flg + AP_CALAB+AP_CALAB.flg, data= chem4.final,
family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt - AP_BIOL, data=chem4.final, family=binomial)
)
chem4.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
summary(chem4.step.first.order)
chem4.step.first.order$anova
model.first.order <- formula(chem4.step.first.order)
chem4.first.order.prop.model <- glm(
model.first.order, data=chem4.final, family=binomial
)
return(chem4.first.order.prop.model)
}
## CHEM 24 ====================================================================
## Filter to relevant variables
chem24.step.vars <- function(course.df)
{
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1b.grd.pt.unt',
'chem1b.term.gpa', 'chem1b.units.attempted', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem24.dat))
chem24.final <- chem24.dat[ ,new.vars]
return(chem24.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem24.step <- function(final.df) {
min.model <- glm(
palN ~ chem1b.grd.pt.unt + cum.percent.units.passed + eth.erss + gender,
data= chem24.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~.- grd.pt.unt - acad.stand - reason - pct.female.head.flg,
data=chem24.final, family=binomial)
)
chem24.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2
)
summary(chem24.step.first.order)
chem24.step.first.order$anova
model.first.order <- formula(chem24.step.first.order)
chem24.first.order.prop.model <- glm(
model.first.order, data=chem24.final, family=binomial
)
return(chem24.first.order.prop.model)
}
Make sure the PAL datafile in the same directory as this RMarkdown file.
PALdatafull <- read_rds("paldatafull_csv.rds")
dim(PALdatafull)
## [1] 1099371 174
sum(PALdatafull$grd.pt.unt)
## [1] 2237555
The files which includes data through the Spring 2019 semester has 1099371 rows and 174 columns. The total of the grd.pt.unt column is 2237555.
chem.classes <- paste("CHEM", c(4, '1A', '1B', 24))
chem.dat <- PALdatafull %>%
filter(base.time.course == 1, course %in% chem.classes) %>%
mutate(course = factor(course, levels = chem.classes))
dim(chem.dat) # 18948 174
## [1] 18948 174
num.stu <- dim(chem.dat)[1]
num.vars <- dim(chem.dat)[2]
There are 18948 rows and 174 variables. Each row is a chemistry student. So, there is a total of 18948 chemistry students.
There are 83 first attempt only Chem 24 Spring 2019 students. Some of them are incorrectly labeled as non-PAL and need to be relabeled.
with(chem.dat %>%
filter(base.time.course == 1, pass.term.flg == "PASS Term", course == "CHEM 24", term == "Spring 2019", course.seq == 0),
table(palN))
## palN
## 0
## 83
chem.dat <- update.chem24s19(chem.dat)
with(chem.dat %>%
filter(base.time.course == 1, pass.term.flg == "PASS Term", course == "CHEM 24", term == "Spring 2019", course.seq == 0),
table(palN))
## palN
## 0 1 2
## 28 9 46
# 0 1 2
# 28 9 46
After relabeling, there are 28 non-PAL students, 9 incomplete PAL students, and 46 PAL students for Chem 24 Spring 2019.
The course.seq variable indicate how many times a student has taken a course prior to the current attempt. To filter on the first attempt at a course, we set course.seq to 0.
Note: Excludes incomplete PAL students
get.raw.tab(chem.classes, chem.dat)
class | nonPALavg | PALavg | Diff | NonPAL_Num | PAL_Num | CompletePAL | TermPALStart |
---|---|---|---|---|---|---|---|
CHEM 4 | 2.03 | 2.39 | 0.36 | 1929 | 759 | 0.28 | 2123 |
CHEM 1A | 1.63 | 2.04 | 0.41 | 1717 | 1055 | 0.37 | 2128 |
CHEM 1B | 1.70 | 2.11 | 0.41 | 1090 | 769 | 0.40 | 2138 |
CHEM 24 | 1.63 | 2.06 | 0.43 | 224 | 177 | 0.43 | 2178 |
Create new variables.
delay.from.hs: delay since high school
cum.percent.units.passed: cumulative percent of units passed
cMaj: census majors without concentrations/specializations/tracks/etc.
county: which county did the student live in at the time of application to Sac state
sac.county.flg: did the student live in Sacramento county at the time of application to Sac State
Collapse sparse categories and other miscellaneous clean up of data. Sparse categories can cause complete separation in logistic regression and are only predictive for a few students.
# Check how many students did not complete PAL
sum(chem.dat$palN==1) # 226
## [1] 226
incl.pal.stu <- sum(chem.dat$palN==1)
chem.dat <- clean.data(chem.dat)
dim(chem.dat) # 18722 179
## [1] 18722 179
There were 226 chemistry students who did not complete PAL and were removed from the analysis. There are now 18722 chemistry students instead of 18948.
There were originally 174 variables in the data set, 5 variables were added, so there are now 179 total variables in the data set.
Based on data for 1055 PAL students and 1717 non-PAL students, the unadjusted, raw difference in average grade for PAL and non-PAL students was 0.41 on a A=4.0 grade scale. However, since students self-select into supplemental PAL instruction, it is possible that the resulting PAL and non-PAL groups were not balanced with respect to other characteristics which could impact course grade. For example, if students with better study habits tend to enroll in PAL, all else being equal, the PAL mean grade would be higher than non-PAL– even if PAL had no effect on course grade. Consequently, we also performed a propensity score analysis to adjust the estimated effect of PAL on course grade for potential self-selection biases.
After adjusting for self-selection bias, we found that PAL students earned an average grade \(0.50\pm 0.07\) higher than non-PAL students. A sensitivity analysis indicates that this analysis is moderately sensitive to unknown confounders. Although the data give us sufficient evidence to conclude that PAL increases students’ grades in Chem 1A, the existence of an unknown confounder similar in magnitude to living in on-campus housing during their first year, ethnicity, or major would nullify that conclusion.
A propensity score analysis was conducted to assess the effect of PAL supplemental instruction on Chem 1A course grade. Propensity score adjustment was necessary since the data are observational and the characteristics of students who voluntarily enroll in PAL may differ in ways that may, independently of PAL, impact course grade compared to students who do not enroll in PAL. In propensity score analysis, variables related to both likelihood of PAL enrollment and course grade (confounders) are used in a logistic regression model to obtain a propensity score, which is a student’s likelihood of enrolling in PAL.
For Chem 1A, 19 covariates were found to have a statistically significant relationship to likelihood of enrolling in PAL. Variables related to increased likelihood of enrolling were: Hispanic ethnicity, female gender, lower SAT scores, has an AP Calculus exam score, has an AP Biology exam score, has an AP Chemistry exam score, CSUS GPA at start of term, enrollment in PAL in the past, academic major, higher term units attempted, lower high school GPA, and fewer years between first term at CSUS and high school graduation.
Using the propensity score model, all students in the dataset, PAL and non-PAL, are assigned a propensity score. Then, each PAL student is matched to one or more non-PAL students who have similar propensity score(s). After matching, the PAL and matched non-PAL groups are compared to determine if the distribution of each covariate is similar between the two groups. This is called a balance check. If the standardized difference between the non-PAL and PAL means is less than 0.10 then the strong criteria in (Leite 2017, p.10) is met for covariate balance. If the standardized difference is under 0.25, then a more lenient criteria is met. The highest absolute value standardized mean difference in this analysis is 0.0627. Consequently, adequate balance appears to have been achieved.
The difference in the average grade for the matched PAL and non-PAL data is then calculated. The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is \(0.50\pm 0.07\) or between 0.43 and 0.57 on a 4.0 grade scale. This result is statistically significant with a P-value of \(2.43x10^{-13}\) and is based on 757 PAL students and 711 non-PAL students. For comparison, the non-propensity score adjusted difference in average grade for PAL and non-PAL students was 0.41.
The estimated PAL effect is based on the assumption that the propensity model includes all potential confounders for PAL enrollment and grade in Chem 1A. However, it is possible that unknown confounders exist. A sensitivity analysis was conducted to determine how strong an unknown confounder must be to nullify the statistically significant PAL effect that was found in this analysis. The sensitivity analysis (Rosenbaum, 2002) indicated that an unknown confounder which increases the odds of being in PAL by more than 1.8 is enough to change the treatment effect from significant to non-significant. Inspection of the covariates in the estimated propensity model for Chem 1A indicates that if there is an unknown confounder that has an effect on the propensity score similar to the effect of CSUS GPA at start of term, major, or has an AP Chemistry exam score observed in this analysis, the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works (which is not in our dataset) is an unknown confounder which could reverse the statistical significance of this analysis.
Additionally, a number of variables were removed from this analysis due to large amounts of missingness. Since all students who had missing information on any included covariate were eliminated from the analysis, a balance had to be struck between retaining a sufficiently large pool of PAL and non-PAL students and retaining a sufficient number of important covariates. Variables which were eliminated from this analysis had substantial missing data or were subjectively judged as unlikely to be confounding. The choices about which variables to retain resulted in the original pool of 1055 PAL students in Chem 1A being reduced to 757. Also, 711 non-PAL students were selected out of 1717 original non-PAL students.
When a PAL student had more than one suitable match among the non-PAL students, all non-PAL students were taken as matches and weighted appropriately in the final estimated PAL effect. There were 1561 non-PAL matches. Of the 757 PAL students, 337 were matched one-to-one with non-PAL students and 420 were matched one-to-many with non-PAL students.
The non-PAL and PAL groups will include students with only first attempts at CHEM 1A.They will also include students with previous PAL participation and/or are currently in a PAL for another course.
# Excludes course repeats
chem1a.dat <- chem.dat %>%
filter(course=="CHEM 1A", pass.term.flg == "PASS Term", course.seq== 0)
dim(chem1a.dat) # 2772 179
## [1] 2772 179
There are 2,772 CHEM 1A first attempt only students.
Collapse ‘cMaj’ variable separately for each course since the amount of collapsing necessary will vary by course.
# Collapsed cMaj categories to Biology and Other majors at 0.09
with(chem1a.dat, table(cMaj, palN))
## palN
## cMaj 0 1
## Anthropology 3 5
## Biology 659 547
## Business 8 4
## Chemistry 265 104
## Child Devel/Early Childhood Ed 9 2
## Civil Engineering 50 16
## Communications 4 2
## Computer Engineering 13 2
## Computer Science 15 4
## Criminal Justice 5 6
## Dance 0 1
## Electrical Engineering 21 4
## English 3 1
## Environmental Studies 28 15
## Film 0 1
## French 1 0
## Geography 0 1
## Geology 39 16
## Gerontology 2 0
## Health Science 22 8
## History 3 2
## Interdisciplinary Studies/Special Major 1 0
## Kinesiology/Physical Education 226 164
## Liberal Studies 1 2
## Mathematics 7 1
## Mechanical Engineering 73 17
## Music 4 1
## Nursing 40 22
## Nutrition 77 55
## Philosophy 3 0
## Physical Science 0 1
## Physics 42 7
## Political Science 0 1
## Psychology 24 9
## Social Science 2 1
## Sociology 2 1
## Spanish 3 1
## Speech Pathology 2 2
## Undeclared 45 27
chem1a.dat <- group_category(data = chem1a.dat, feature = "cMaj", threshold = 0.09, update = TRUE)
with(chem1a.dat, table(cMaj, palN))
## palN
## cMaj 0 1
## Biology 659 547
## Chemistry 265 104
## Civil Engineering 50 16
## Geology 39 16
## Kinesiology/Physical Education 226 164
## Mechanical Engineering 73 17
## Nursing 40 22
## Nutrition 77 55
## OTHER 201 80
## Physics 42 7
## Undeclared 45 27
Remove variables having too many missing values in order to retain a larger pool of PAL and non-PAL students.
## [1] 38
## feature num_missing pct_missing
## 17 Instructor_02 2772 1.0000000
## 22 deg.plan3 2772 1.0000000
## 23 deg.plan4 2772 1.0000000
## 24 deg.plan5 2772 1.0000000
## 25 deg.plan6 2772 1.0000000
## 19 withdraw_reason 2762 0.9963925
## 21 deg.plan2 2739 0.9880952
## 4 pledge.term 2366 0.8535354
## 11 trf.gpaADM 2308 0.8326118
## 1 fys.term.code 1630 0.5880231
## 2 fys.grd 1630 0.5880231
## 3 fys.rpt.flg 1630 0.5880231
## 27 grad.termERS 1453 0.5241703
## 20 deg.plan1 1415 0.5104618
## 26 grad.term 1415 0.5104618
## 28 ttg 1415 0.5104618
## 18 treat.section 1237 0.4462482
## 31 plan.college 925 0.3336941
## 32 plan.college.desc 925 0.3336941
## 33 plan.dept 925 0.3336941
## 34 plan.deptAbbr 925 0.3336941
## 35 plan.degree 925 0.3336941
## 36 plan.type 925 0.3336941
## 5 sat.math.score 616 0.2222222
## 6 sat.math.flg 616 0.2222222
## 7 sat.verbal.score 616 0.2222222
## 8 sat.verbal.flg 616 0.2222222
## 9 sat.test.date 616 0.2222222
## 13 ge.critical.thinking.status 524 0.1890332
## 14 ge.english.comp.status 524 0.1890332
## 15 ge.math.status 524 0.1890332
## 16 ge.oral.comm.status 524 0.1890332
## 12 admit.term 511 0.1843434
## 10 hs.gpa 420 0.1515152
## 38 county 355 0.1280664
## 29 tot.passd.prgrss.start 325 0.1172439
## 30 tot.taken.prgrss.start 325 0.1172439
## 37 cum.percent.units.passed 325 0.1172439
## [1] 2772 146
38 variables missing >10%
5 out of 38 variables were important and force included, even though they were missing >10%
So, 33 variables were removed due to missingness and there are now 146 variables instead of 179 variables.
chem1a.dat <- chem1a.dat[complete.cases(chem1a.dat), ]
dim(chem1a.dat) # 1769 146
## [1] 1769 146
1769 out of 2772 students are kept
1043 students were removed due to missingness of variables
single.vars <- chem1a.dat %>%
summarise(across(everything(), ~ n_distinct(.x))) %>%
select_if(. == 1)
# Table of variables with single values
CreateTableOne(vars = names(single.vars), data = chem1a.dat)
##
## Overall
## n 1769
## country = USA (%) 1769 (100.0)
## career.course = UGRD (%) 1769 (100.0)
## acad.prog.course = UGD (%) 1769 (100.0)
## course (%)
## CHEM 4 0 ( 0.0)
## CHEM 1A 1769 (100.0)
## CHEM 1B 0 ( 0.0)
## CHEM 24 0 ( 0.0)
## component = LEC (%) 1769 (100.0)
## units (mean (SD)) 5.00 (0.00)
## course.numeric (mean (SD)) 1.00 (0.00)
## div = Lower Division (%) 1769 (100.0)
## Instructor_01 (%)
## 1 0 ( 0.0)
## 2 0 ( 0.0)
## 3 0 ( 0.0)
## 4 0 ( 0.0)
## 5 0 ( 0.0)
## 6 1769 (100.0)
## 7 0 ( 0.0)
## 8 0 ( 0.0)
## 9 0 ( 0.0)
## 10 0 ( 0.0)
## 11 0 ( 0.0)
## 12 0 ( 0.0)
## 13 0 ( 0.0)
## 14 0 ( 0.0)
## 15 0 ( 0.0)
## 16 0 ( 0.0)
## 17 0 ( 0.0)
## 18 0 ( 0.0)
## 19 0 ( 0.0)
## 20 0 ( 0.0)
## 21 0 ( 0.0)
## 22 0 ( 0.0)
## course.seq (mean (SD)) 0.00 (0.00)
## rpt.flg = First Attempt (%) 1769 (100.0)
## c2s = Non-C2S (%) 1769 (100.0)
## base.time.course (mean (SD)) 1.00 (0.00)
## years (mean (SD)) 0.50 (0.00)
## withdraw_code = NWD (%) 1769 (100.0)
## enrl.flg = Enrolled (%) 1769 (100.0)
## enrl.flgERS = Enrolled (%) 1769 (100.0)
## rtn.flg = Retained (%) 1769 (100.0)
## rtn.flgERS = Retained (%) 1769 (100.0)
## pass.term.flg = PASS Term (%) 1769 (100.0)
## csus.gpa.start.flg = Not Missing (%) 1769 (100.0)
## higher.ed.gpa.start.flg = Not Missing (%) 1769 (100.0)
sum(single.vars) # 22
## [1] 22
# remove single-valued variables
chem1a.dat<- chem1a.dat %>%
dplyr::select(-names(single.vars))
dim(chem1a.dat) # 1769 124
## [1] 1769 124
124 out of 146 variables are kept
22 variables removed due to single values
# Remove non chem1a instructors
chem1a.dat <- chem1a.dat %>%
droplevels(chem1a.dat$Instructor_01)
# Combine sparse ethnicity categories to Other
chem1a.dat <- chem1a.dat %>%
mutate(eth.erss = fct_collapse(eth.erss, `Other` = c("Foreign", "Native American", "Pacific Islander")))
with(chem1a.dat, table(eth.erss, palN))
## palN
## eth.erss 0 1
## African American 44 30
## Asian 336 237
## Other 20 21
## Hispanic 250 242
## Two or More Races 81 40
## Unknown 38 15
## White 238 177
# Collapse sparse categories for acad.stand
# Other: Academic Dismissal, Academic Disqualification
chem1a.dat <- chem1a.dat %>%
mutate(acad.stand = fct_other(acad.stand, keep = c("Good Standing")))
with(chem1a.dat, table(acad.stand, palN))
## palN
## acad.stand 0 1
## Good Standing 955 748
## Other 52 14
Sujective judgment was used to narrow the pool of variables down to those likely to be confounders. It’s important to include all variables correlated with outcome even if it is uncertain whether they are related to likeihood of enrolling in PAL. This allows for a more precise estimate of the treatment effect.
chem1a.final <- chem1a.step.vars(chem1a.dat)
kable(names(chem1a.final))
x |
---|
acad.stand |
adm.area |
bot.level |
cMaj |
coh |
course.age |
csus.gpa.start |
cum.percent.units.passed |
delay.from.hs |
e.rmd |
eth.erss |
father.ed |
fys.flg |
gender |
hous.coh.term.flg |
hs.gpa |
median.income |
m.rmd |
mother.ed |
pct.female.head |
pell.coh.term.flg |
prevPAL |
prevPASS |
reason |
sac.county.flg |
term.units.attemptedCensus |
palN |
grd.pt.unt |
sat.math.score |
sat.math.flg |
sat.verbal.score |
AP_BIOL |
AP_CALAB |
AP_CALBC |
AP_CHEM |
AP_BIOL.flg |
AP_CALAB.flg |
AP_CALBC.flg |
AP_CHEM.flg |
pct.female.head.flg |
med.inc.flg |
Subjectively identified four potential confounders to force the model to retain: cum.percent.units.passed, gender, eth.erss, sat.math.score, sat.verbal.score, and sat.math.flg (same as sat.verbal.flg). Stepwise variable selection will be used to select which of the variables currently in the PAL dataset to include in the propensity model.
chem1a.first.order.prop.model <- chem1a.step(chem1a.final)
summary(chem1a.first.order.prop.model)
##
## Call:
## glm(formula = model.first.order, family = binomial, data = chem1a.final)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -1.8322 -1.0126 -0.6089 1.0904 2.4921
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.2066673 1.3077056 1.687 0.09152 .
## cum.percent.units.passed 0.0185532 0.7023697 0.026 0.97893
## eth.erssAsian 0.2006993 0.2729443 0.735 0.46215
## eth.erssOther 0.4196394 0.4155541 1.010 0.31258
## eth.erssHispanic 0.4779929 0.2730791 1.750 0.08005 .
## eth.erssTwo or More Races -0.1297781 0.3288717 -0.395 0.69313
## eth.erssUnknown -0.2929460 0.4188639 -0.699 0.48431
## eth.erssWhite 0.3413619 0.2798446 1.220 0.22253
## genderMale -0.3338347 0.1175127 -2.841 0.00450 **
## sat.math.score -0.0031376 0.0009867 -3.180 0.00147 **
## sat.verbal.score -0.0024583 0.0009013 -2.728 0.00638 **
## sat.math.flgold -0.1286384 0.2106295 -0.611 0.54138
## AP_CALAB -0.0296383 0.1061827 -0.279 0.78015
## AP_CALAB.flgNot Missing -0.3448884 0.1673607 -2.061 0.03933 *
## AP_BIOL 0.0113165 0.1876565 0.060 0.95191
## AP_BIOL.flgNot Missing -0.3273270 0.1949865 -1.679 0.09321 .
## AP_CHEM -0.7434323 0.4691420 -1.585 0.11304
## AP_CHEM.flgNot Missing -1.1385896 0.4174880 -2.727 0.00639 **
## csus.gpa.start 0.7188766 0.1466119 4.903 9.43e-07 ***
## prevPAL 0.5319611 0.1061938 5.009 5.46e-07 ***
## cMajChemistry -0.7074529 0.1725795 -4.099 4.14e-05 ***
## cMajCivil Engineering -0.5976542 0.3925320 -1.523 0.12787
## cMajGeology -0.0987511 0.4921359 -0.201 0.84097
## cMajKinesiology/Physical Education -0.1260763 0.1522197 -0.828 0.40753
## cMajMechanical Engineering -0.8508806 0.3391102 -2.509 0.01210 *
## cMajNursing 0.0066722 0.3863916 0.017 0.98622
## cMajNutrition -0.0288961 0.2734756 -0.106 0.91585
## cMajOTHER -0.6094946 0.2157415 -2.825 0.00473 **
## cMajPhysics -0.7988319 0.4622694 -1.728 0.08398 .
## cMajUndeclared -0.4627973 0.3003148 -1.541 0.12331
## term.units.attemptedCensus 0.0710439 0.0279899 2.538 0.01114 *
## hs.gpa -0.2970032 0.1446650 -2.053 0.04007 *
## delay.from.hs -0.1154636 0.0537763 -2.147 0.03178 *
## acad.standOther -0.4841617 0.3451585 -1.403 0.16070
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 2418.3 on 1768 degrees of freedom
## Residual deviance: 2168.3 on 1735 degrees of freedom
## AIC: 2236.3
##
## Number of Fisher Scoring iterations: 5
p.score <- chem1a.first.order.prop.model$fitted.values
chem1a.covs <- names(chem1a.first.order.prop.model %>% pluck("model") %>% dplyr::select(-palN))
# Unadjusted mean grades
get.unadj.means(chem1a.final)
Non-PAL | PAL | Diff. |
---|---|---|
1.601986 | 1.974409 | 0.3724234 |
## Stratified by palN
## 0 1 SMD
## n 1007 762
## cum.percent.units.passed (mean (SD)) 0.91 (0.10) 0.91 (0.09) 0.005
## eth.erss (%) 0.213
## African American 44 ( 4.4) 30 ( 3.9)
## Asian 336 (33.4) 237 (31.1)
## Other 20 ( 2.0) 21 ( 2.8)
## Hispanic 250 (24.8) 242 (31.8)
## Two or More Races 81 ( 8.0) 40 ( 5.2)
## Unknown 38 ( 3.8) 15 ( 2.0)
## White 238 (23.6) 177 (23.2)
## gender = Male (%) 471 (46.8) 241 (31.6) 0.314
## sat.math.score (mean (SD)) 529.74 (78.45) 499.23 (74.18) 0.400
## sat.verbal.score (mean (SD)) 499.95 (82.51) 477.60 (74.57) 0.284
## sat.math.flg = old (%) 942 (93.5) 702 (92.1) 0.055
## AP_CALAB (mean (SD)) 2.57 (0.62) 2.54 (0.47) 0.053
## AP_CALAB.flg = Not Missing (%) 197 (19.6) 94 (12.3) 0.198
## AP_BIOL (mean (SD)) 2.51 (0.31) 2.50 (0.29) 0.040
## AP_BIOL.flg = Not Missing (%) 109 (10.8) 61 ( 8.0) 0.097
## AP_CHEM (mean (SD)) 1.96 (0.23) 1.96 (0.13) 0.030
## AP_CHEM.flg = Not Missing (%) 54 ( 5.4) 16 ( 2.1) 0.173
## csus.gpa.start (mean (SD)) 3.11 (0.53) 3.22 (0.46) 0.237
## prevPAL (mean (SD)) 0.22 (0.46) 0.37 (0.56) 0.308
## cMaj (%) 0.365
## Biology 412 (40.9) 405 (53.1)
## Chemistry 145 (14.4) 76 (10.0)
## Civil Engineering 28 ( 2.8) 12 ( 1.6)
## Geology 15 ( 1.5) 7 ( 0.9)
## Kinesiology/Physical Education 150 (14.9) 129 (16.9)
## Mechanical Engineering 48 ( 4.8) 14 ( 1.8)
## Nursing 16 ( 1.6) 16 ( 2.1)
## Nutrition 35 ( 3.5) 34 ( 4.5)
## OTHER 90 ( 8.9) 43 ( 5.6)
## Physics 26 ( 2.6) 7 ( 0.9)
## Undeclared 42 ( 4.2) 19 ( 2.5)
## term.units.attemptedCensus (mean (SD)) 13.65 (2.10) 13.90 (1.91) 0.125
## hs.gpa (mean (SD)) 3.42 (0.44) 3.40 (0.42) 0.030
## delay.from.hs (mean (SD)) 2.23 (1.28) 2.16 (1.08) 0.052
## acad.stand = Other (%) 52 ( 5.2) 14 ( 1.8) 0.182
Check how many variables have SMD > 0.1
addmargins(table(ExtractSmd(unmatched.tab) > 0.1))
##
## FALSE TRUE Sum
## 8 11 19
get.imbal.vars(unmatched.tab)
Variable | Before Matching SMD |
---|---|
sat.math.score | 0.3997009 |
cMaj | 0.3649218 |
gender | 0.3140303 |
prevPAL | 0.3077470 |
sat.verbal.score | 0.2842378 |
csus.gpa.start | 0.2374740 |
eth.erss | 0.2125984 |
AP_CALAB.flg | 0.1983562 |
acad.stand | 0.1817409 |
AP_CHEM.flg | 0.1727961 |
term.units.attemptedCensus | 0.1253328 |
11 out of 19 variables have SMD >0.1
Implement a propensity score matching method.
match.chem1a <- with(chem1a.final, Match(
Y=chem1a.final$grd.pt.unt, Tr = chem1a.final$palN, X = p.score,
BiasAdjust = F, estimand = "ATT", M=1, caliper=0.25, replace = TRUE, ties = TRUE))
Standardized mean differences for continuous variables and categorical variables.
# Needed for match table
chem1a.final <- chem1a.final %>%
rownames_to_column(var = "id")
# Matched data
chem1a.matched.dat <- chem1a.final[unlist(match.chem1a[c("index.treated", "index.control")]), ]
chem1a.matched.dat$match.weights<- c(match.chem1a$weights, match.chem1a$weights)
# Add match weights to match data
weighted.dat<-svydesign(id=~1,weights=~match.weights, data = chem1a.matched.dat)
# Variable Summary Table for matched data with match weights
matched.tab <-svyCreateTableOne(vars = chem1a.covs, strata = "palN", data= weighted.dat, smd = TRUE, test = FALSE)
print(matched.tab, smd = TRUE)
## Stratified by palN
## 0 1 SMD
## n 757.00 757.00
## cum.percent.units.passed (mean (SD)) 0.91 (0.09) 0.91 (0.09) 0.053
## eth.erss (%) 0.116
## African American 21.2 ( 2.8) 29.0 ( 3.8)
## Asian 230.7 (30.5) 237.0 (31.3)
## Other 18.1 ( 2.4) 21.0 ( 2.8)
## Hispanic 224.1 (29.6) 238.0 (31.4)
## Two or More Races 37.0 ( 4.9) 40.0 ( 5.3)
## Unknown 14.7 ( 1.9) 15.0 ( 2.0)
## White 211.1 (27.9) 177.0 (23.4)
## gender = Male (%) 226.1 (29.9) 240.0 (31.7) 0.040
## sat.math.score (mean (SD)) 498.04 (73.29) 500.17 (73.25) 0.029
## sat.verbal.score (mean (SD)) 478.74 (75.32) 478.10 (74.42) 0.009
## sat.math.flg = old (%) 697.2 (92.1) 697.0 (92.1) 0.001
## AP_CALAB (mean (SD)) 2.50 (0.49) 2.54 (0.47) 0.069
## AP_CALAB.flg = Not Missing (%) 91.8 (12.1) 94.0 (12.4) 0.009
## AP_BIOL (mean (SD)) 2.52 (0.21) 2.50 (0.30) 0.059
## AP_BIOL.flg = Not Missing (%) 57.4 ( 7.6) 61.0 ( 8.1) 0.018
## AP_CHEM (mean (SD)) 1.96 (0.12) 1.96 (0.13) 0.009
## AP_CHEM.flg = Not Missing (%) 13.8 ( 1.8) 16.0 ( 2.1) 0.021
## csus.gpa.start (mean (SD)) 3.20 (0.47) 3.22 (0.46) 0.039
## prevPAL (mean (SD)) 0.38 (0.61) 0.36 (0.54) 0.041
## cMaj (%) 0.123
## Biology 390.5 (51.6) 401.0 (53.0)
## Chemistry 92.9 (12.3) 76.0 (10.0)
## Civil Engineering 13.0 ( 1.7) 12.0 ( 1.6)
## Geology 9.6 ( 1.3) 7.0 ( 0.9)
## Kinesiology/Physical Education 117.2 (15.5) 128.0 (16.9)
## Mechanical Engineering 11.0 ( 1.5) 14.0 ( 1.8)
## Nursing 13.7 ( 1.8) 16.0 ( 2.1)
## Nutrition 32.0 ( 4.2) 34.0 ( 4.5)
## OTHER 56.3 ( 7.4) 43.0 ( 5.7)
## Physics 5.4 ( 0.7) 7.0 ( 0.9)
## Undeclared 15.2 ( 2.0) 19.0 ( 2.5)
## term.units.attemptedCensus (mean (SD)) 13.96 (1.83) 13.90 (1.91) 0.030
## hs.gpa (mean (SD)) 3.40 (0.42) 3.41 (0.42) 0.013
## delay.from.hs (mean (SD)) 2.20 (1.21) 2.16 (1.08) 0.039
## acad.stand = Other (%) 22.8 ( 3.0) 14.0 ( 1.8) 0.075
Continuous variables: Standardized mean differences are computed by using the standard deviation of treated group
Binary variables: Raw differences in proportion
All variables are balanced and under the <0.1 mean threshold.
chem1a.bal <- bal.tab(match.chem1a, formula = f.build("palN", chem1a.covs), data = chem1a.final,
distance = ~ p.score, thresholds = c(m = .1), un = TRUE, imbalanced.only = TRUE)
chem1a.bal
## Balance Measures
## All covariates are balanced.
##
## Balance tally for mean differences
## count
## Balanced, <0.1 36
## Not Balanced, >0.1 0
##
## Variable with the greatest mean difference
## Variable Diff.Adj M.Threshold
## AP_CALAB 0.07 Balanced, <0.1
##
## Sample sizes
## Control Treated
## All 1007. 762
## Matched (ESS) 313.69 757
## Matched (Unweighted) 711. 757
## Unmatched 296. 0
## Discarded 0. 5
get.var.perc.tab(chem1a.bal)
## Variable Diff.Un Diff.Adj % Improvement
## 1 p.score 0.813473150 0.0009607886 100
## 2 eth.erss_Unknown -0.018050810 0.0004010191 98
## 3 sat.math.flg_old -0.014191995 -0.0002642008 98
## 4 sat.verbal.score -0.299750402 -0.0086395931 97
## 5 AP_CALAB.flg_Not Missing -0.072271006 0.0028952004 96
## 6 sat.math.score -0.411373506 0.0286721308 93
## 7 AP_CHEM.flg_Not Missing -0.032627252 0.0029502422 91
## 8 cMaj_Biology 0.122360015 0.0138422344 89
## 9 cMaj_Civil Engineering -0.012057331 -0.0013099956 89
## 10 gender_Male -0.151452953 0.0183855444 88
## 11 cMaj_Physics -0.016632913 0.0020695729 88
## 12 cMaj_Mechanical Engineering -0.029293632 0.0039189784 87
## 13 eth.erss_Two or More Races -0.027943503 0.0038969617 86
## 14 prevPAL 0.281028450 -0.0422296608 85
## 15 csus.gpa.start 0.255366543 0.0396354928 84
## 16 AP_BIOL.flg_Not Missing -0.028189810 0.0047225892 83
## 17 AP_CHEM -0.043044496 0.0088420199 79
## 18 term.units.attemptedCensus 0.131689543 -0.0297896263 77
## 19 eth.erss_Hispanic 0.069323137 0.0183478015 74
## 20 cMaj_Nutrition 0.009862719 0.0025869661 74
## 21 cMaj_Undeclared -0.016773660 0.0050166698 70
## 22 acad.stand_Other -0.033265827 -0.0115918098 65
## 23 eth.erss_Asian -0.022640728 0.0082609926 64
## 24 hs.gpa -0.030258269 0.0130619500 57
## 25 eth.erss_Other 0.007698082 0.0037837328 51
## 26 cMaj_Chemistry -0.044254523 -0.0223469837 50
## 27 cMaj_OTHER -0.032943933 -0.0175803611 47
## 28 cMaj_Geology -0.005709378 -0.0033937221 41
## 29 cMaj_Nursing 0.005108597 0.0029942756 41
## 30 cMaj_Kinesiology/Physical Education 0.020334040 0.0142023652 30
## 31 delay.from.hs -0.057063209 -0.0409351417 28
## 32 AP_CALAB -0.062685060 0.0700268159 -12
## 33 AP_BIOL -0.041283872 -0.0516391001 -25
## 34 eth.erss_African American -0.004324062 0.0103478644 -139
## 35 eth.erss_White -0.004062116 -0.0450383720 -1009
## 36 cum.percent.units.passed 0.004657179 0.0537413078 -1054
get.bal.plot(unmatched.tab, matched.tab)
love.plot(chem1a.bal,binary = "raw", stars = "std", var.order = "unadjusted",
thresholds = c(m = .1), abs = F)
create.match.tab(chem1a.matched.dat)
Non-PAL | PAL | |
---|---|---|
Single Matches | 278 | 337 |
Multiple Matches | 433 | 420 |
Total Students | 711 | 757 |
Out of 762 PAL students, 757 were matched and 5 were unable to be matched. Out of 757 PAL student matches, 337 PAL students were matched to one non-PAL student and 420 PAL students were matched to multiple non-PAL students.
Out of 1561 non-PAL student matches, there were 711 non-PAL students, 278 of the non-PAL students were matched to one PAL student and 433 of the non-PAL students were matched to multiple PAL students.
get.att.plot(chem1a.final, match.chem1a)
The standardized mean differences of the prognostic scores is 0.0335, which indicates balance. All variables are under the 0.01 mean difference threshold. It is likely that the effect estimate will be relatively unbiased, since the estimated prognostic score is balanced.
##
## Call:
## glm(formula = f.build("grd.pt.unt", chem1a.covs), data = ctrl.data)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.39006 -0.58465 0.02405 0.57994 2.76737
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.2196838 0.5382662 -9.697 < 2e-16 ***
## cum.percent.units.passed 0.2918154 0.3625120 0.805 0.42103
## eth.erssAsian 0.0601629 0.1430710 0.421 0.67421
## eth.erssOther 0.0209315 0.2374072 0.088 0.92976
## eth.erssHispanic -0.0631047 0.1453120 -0.434 0.66419
## eth.erssTwo or More Races -0.0504216 0.1665614 -0.303 0.76217
## eth.erssUnknown 0.2034702 0.1984971 1.025 0.30559
## eth.erssWhite 0.0313683 0.1474357 0.213 0.83156
## genderMale 0.1350774 0.0621716 2.173 0.03005 *
## sat.math.score 0.0024302 0.0005127 4.740 2.45e-06 ***
## sat.verbal.score -0.0008151 0.0004630 -1.760 0.07866 .
## sat.math.flgold 0.1244919 0.1187407 1.048 0.29470
## AP_CALAB 0.0791501 0.0472506 1.675 0.09423 .
## AP_CALAB.flgNot Missing 0.0481646 0.0796500 0.605 0.54552
## AP_BIOL 0.1680120 0.0935846 1.795 0.07292 .
## AP_BIOL.flgNot Missing 0.0269552 0.0953309 0.283 0.77743
## AP_CHEM 0.3201416 0.1258924 2.543 0.01115 *
## AP_CHEM.flgNot Missing 0.3035225 0.1284433 2.363 0.01832 *
## csus.gpa.start 0.9656280 0.0758034 12.739 < 2e-16 ***
## prevPAL -0.0695985 0.0624447 -1.115 0.26531
## cMajChemistry 0.1194315 0.0869728 1.373 0.17000
## cMajCivil Engineering 0.3998032 0.1789340 2.234 0.02569 *
## cMajGeology -0.3463892 0.2371614 -1.461 0.14446
## cMajKinesiology/Physical Education 0.0310270 0.0855213 0.363 0.71683
## cMajMechanical Engineering 0.1324651 0.1417757 0.934 0.35037
## cMajNursing -0.4427774 0.2251455 -1.967 0.04951 *
## cMajNutrition -0.0323645 0.1578557 -0.205 0.83759
## cMajOTHER 0.1995494 0.1050010 1.900 0.05767 .
## cMajPhysics -0.0867119 0.1849268 -0.469 0.63925
## cMajUndeclared -0.1265802 0.1437063 -0.881 0.37863
## term.units.attemptedCensus 0.0079314 0.0141139 0.562 0.57427
## hs.gpa 0.2435173 0.0756390 3.219 0.00133 **
## delay.from.hs 0.1102145 0.0257807 4.275 2.10e-05 ***
## acad.standOther -0.1641247 0.1412638 -1.162 0.24559
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.7558023)
##
## Null deviance: 1209.9 on 1006 degrees of freedom
## Residual deviance: 735.4 on 973 degrees of freedom
## AIC: 2611.2
##
## Number of Fisher Scoring iterations: 2
## Balance Measures
## Type Diff.Adj M.Threshold
## prog.score Distance 0.0335 Balanced, <0.1
## cum.percent.units.passed Contin. 0.0537 Balanced, <0.1
## eth.erss_African American Binary 0.0103 Balanced, <0.1
## eth.erss_Asian Binary 0.0083 Balanced, <0.1
## eth.erss_Other Binary 0.0038 Balanced, <0.1
## eth.erss_Hispanic Binary 0.0183 Balanced, <0.1
## eth.erss_Two or More Races Binary 0.0039 Balanced, <0.1
## eth.erss_Unknown Binary 0.0004 Balanced, <0.1
## eth.erss_White Binary -0.0450 Balanced, <0.1
## gender_Male Binary 0.0184 Balanced, <0.1
## sat.math.score Contin. 0.0287 Balanced, <0.1
## sat.verbal.score Contin. -0.0086 Balanced, <0.1
## sat.math.flg_old Binary -0.0003 Balanced, <0.1
## AP_CALAB Contin. 0.0700 Balanced, <0.1
## AP_CALAB.flg_Not Missing Binary 0.0029 Balanced, <0.1
## AP_BIOL Contin. -0.0516 Balanced, <0.1
## AP_BIOL.flg_Not Missing Binary 0.0047 Balanced, <0.1
## AP_CHEM Contin. 0.0088 Balanced, <0.1
## AP_CHEM.flg_Not Missing Binary 0.0030 Balanced, <0.1
## csus.gpa.start Contin. 0.0396 Balanced, <0.1
## prevPAL Contin. -0.0422 Balanced, <0.1
## cMaj_Biology Binary 0.0138 Balanced, <0.1
## cMaj_Chemistry Binary -0.0223 Balanced, <0.1
## cMaj_Civil Engineering Binary -0.0013 Balanced, <0.1
## cMaj_Geology Binary -0.0034 Balanced, <0.1
## cMaj_Kinesiology/Physical Education Binary 0.0142 Balanced, <0.1
## cMaj_Mechanical Engineering Binary 0.0039 Balanced, <0.1
## cMaj_Nursing Binary 0.0030 Balanced, <0.1
## cMaj_Nutrition Binary 0.0026 Balanced, <0.1
## cMaj_OTHER Binary -0.0176 Balanced, <0.1
## cMaj_Physics Binary 0.0021 Balanced, <0.1
## cMaj_Undeclared Binary 0.0050 Balanced, <0.1
## term.units.attemptedCensus Contin. -0.0298 Balanced, <0.1
## hs.gpa Contin. 0.0131 Balanced, <0.1
## delay.from.hs Contin. -0.0409 Balanced, <0.1
## acad.stand_Other Binary -0.0116 Balanced, <0.1
## p.score Contin. 0.0010 Balanced, <0.1
##
## Balance tally for mean differences
## count
## Balanced, <0.1 37
## Not Balanced, >0.1 0
##
## Variable with the greatest mean difference
## Variable Diff.Adj M.Threshold
## AP_CALAB 0.07 Balanced, <0.1
##
## Sample sizes
## Control Treated
## All 1007. 762
## Matched (ESS) 313.69 757
## Matched (Unweighted) 711. 757
## Unmatched 296. 0
## Discarded 0. 5
The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is 0.4952651 . This result is statistically significant with a P-value of \(4.8561x10^{-13}\) and is based on 757 PAL students and 1561 non-PAL student matches(711 total non-PAL students). Note this P-value is for a two-tailed test, but it will be corrected to a one-tailed test (halves the P-value) in the final table output summarizing the effect of PAL across chemistry courses.
summary(match.chem1a)
##
## Estimate... 0.49527
## AI SE...... 0.068509
## T-stat..... 7.2292
## p.val...... 4.8561e-13
##
## Original number of observations.............. 1769
## Original number of treated obs............... 762
## Matched number of observations............... 757
## Matched number of observations (unweighted). 1561
##
## Caliper (SDs)........................................ 0.25
## Number of obs dropped by 'exact' or 'caliper' 5
psens(match.chem1a, Gamma=2.0, GammaInc = 0.1)
##
## Rosenbaum Sensitivity Test for Wilcoxon Signed Rank P-Value
##
## Unconfounded estimate .... 0
##
## Gamma Lower bound Upper bound
## 1.0 0 0.0000
## 1.1 0 0.0000
## 1.2 0 0.0000
## 1.3 0 0.0000
## 1.4 0 0.0000
## 1.5 0 0.0000
## 1.6 0 0.0006
## 1.7 0 0.0106
## 1.8 0 0.0764
## 1.9 0 0.2732
## 2.0 0 0.5716
##
## Note: Gamma is Odds of Differential Assignment To
## Treatment Due to Unobserved Factors
##
Note that in the above table \(\Gamma=1.8\) in the first column is the first row where 0.05 is between the Lower and Upper bounds. This means that an unknown confounder which increases the odds of being in PAL by more than1.8 is enough to change the treatment effect from significant to non-significant. The next code block generates the effect on the odds ratio of each variable in the propensity score. Thus, if there is an unknown confounder that has an effect on the propensity score similar to “cMaj” or “csus.gpa.start” the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works which is not in our dataset is a confounder which could reverse the statistical significance of this analysis.
kable(sort(exp(abs(chem1a.first.order.prop.model$coefficients))))
x | |
---|---|
sat.verbal.score | 1.002461 |
sat.math.score | 1.003143 |
cMajNursing | 1.006695 |
AP_BIOL | 1.011381 |
cum.percent.units.passed | 1.018726 |
cMajNutrition | 1.029318 |
AP_CALAB | 1.030082 |
term.units.attemptedCensus | 1.073628 |
cMajGeology | 1.103792 |
delay.from.hs | 1.122394 |
cMajKinesiology/Physical Education | 1.134369 |
sat.math.flgold | 1.137279 |
eth.erssTwo or More Races | 1.138576 |
eth.erssAsian | 1.222257 |
eth.erssUnknown | 1.340370 |
hs.gpa | 1.345820 |
AP_BIOL.flgNot Missing | 1.387255 |
genderMale | 1.396312 |
eth.erssWhite | 1.406862 |
AP_CALAB.flgNot Missing | 1.411832 |
eth.erssOther | 1.521413 |
cMajUndeclared | 1.588511 |
eth.erssHispanic | 1.612834 |
acad.standOther | 1.622814 |
prevPAL | 1.702267 |
cMajCivil Engineering | 1.817850 |
cMajOTHER | 1.839501 |
cMajChemistry | 2.028817 |
csus.gpa.start | 2.052126 |
AP_CHEM | 2.103142 |
cMajPhysics | 2.222943 |
cMajMechanical Engineering | 2.341708 |
AP_CHEM.flgNot Missing | 3.122362 |
(Intercept) | 9.085387 |
Course | Non-PAL | PAL | Diff. | Std. error | p-val | Sensitivity | N(non-PAL) | N(PAL) |
---|---|---|---|---|---|---|---|---|
CHEM 1A | 1.48 | 1.98 | 0.5 | 0.07 | 2.43e-13 | 1.8 | 711 | 757 |
Greifer, Noah. 2020. Cobalt: Covariate Balance Tables and Plots. https://CRAN.R-project.org/package=cobalt.
Leite, W. L. 2017. Practical Propensity Score Methods Using R. Thousand Oaks, CA: Sage Publishing. https://osf.io/nygb5/.
Sekhon, Jasjeet S. 2011. “Multivariate and Propensity Score Matching Software with Automated Balance Optimization: The Matching Package for R.” Journal of Statistical Software 42 (7): 1–52. http://www.jstatsoft.org/v42/i07/.
Yoshida, Kazuki, and Alexander Bartel. 2020. Tableone: Create ’Table 1’ to Describe Baseline Characteristics with or Without Propensity Score Weights. https://CRAN.R-project.org/package=tableone.
Zhang, Z., H. J. Kim, G. Lonjon, Y. Zhu, and written on behalf of AME Big-Data Clinical Trial Collaborative Group. 2019. “Balance Diagnostics After Propensity Score Matching.” Annals of Translational Medicine 7 (1): 16. https://doi.org/10.21037/atm.2018.12.10.