First Add Some R Packages to the Workspace.
Caution: warning messages are suppressed to reduce clutter in the output.
tidyverse: Importing data, cleaning data, data manipulation, & data visualization
kableExtra: Build HTML tables
DataExplorer: Exploratory Data Analysis & Feature Engineering
tableone: Standardized mean differences for before and after matching
survey: Matched data with match weights
Matching: Propensity score matching
cobalt: Covariate balance
reshape2: Covariate balance plot
rbounds: Rosenbaum Sensitivity test
library(tidyverse)
library(kableExtra)
library(DataExplorer)
library(tableone)
library(survey)
library(Matching)
library(cobalt)
library(reshape2)
library(rbounds)
select <- dplyr::select # Resolves package conflicts with select
options(width = 120) # Format print width
General functions used throughout the analysis.
# Update palN for Chem 24 Spring 2019 ------------------------------------------
update.chem24s19 <- function(chem.dat) {
PAL.course.data <- read_rds("palCourseData.rds")
chem24.S19 <- PAL.course.data %>%
filter(term == "Spring 2019", course == "CHEM 24")
# Add a palN indicator for Chem 24 Spring 2019
chem24.S19 <- chem24.S19 %>%
mutate(palN.chem24.S19 = case_when(
pal.grade == "CR" ~ 2,
is.na(pal.grade) ~ 0,
TRUE ~ 1
)) %>%
select(emplid, palN.chem24.S19)
# Check how many student are non-PAL, incomplete PAL, and PAL
table(chem24.S19$palN.chem24.S19)
# 0 1 2
# 51 10 52
chem.dat <- left_join(chem.dat, chem24.S19, by= "emplid" )
chem.dat <- chem.dat %>%
mutate(palN = case_when(
course == "CHEM 24" & term == "Spring 2019" ~ palN.chem24.S19,
TRUE ~ palN
)) %>%
select(-palN.chem24.S19)
return(chem.dat)
}
# Get raw table of mean gpa for PAL and non-PAL -------------------------------
get.raw.tab <- function(classes, df)
{
raw.table = data.frame(class=character(),
nonPALavg=numeric(),
PALavg=numeric(),
Diff=numeric(),
NonPAL_Num= integer(),
PAL_Num=integer(),
CompletePAL=numeric(),
TermPALStart=integer(),
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:length(classes))
{
curr.class = classes[i]
temp = subset(df, course==curr.class & course.seq==0)
pal.start=min(unique(temp$term.code[temp$palN==2]))
# only include terms after PAL start term
temp = subset(temp, term.code>= pal.start)
x=tapply(temp$grd.pt.unt,temp$palN,
mean, na.rm=T) %>%
as.numeric %>%
round(2)
y=table(temp$palN) %>% as.numeric
raw.table[i, 'class' ] = curr.class
raw.table[i, c(2:4,7)]=c(x[1], x[3],x[3]-x[1],
round(y[3]/sum(y),2))
raw.table[i, c(5,6,8)]= c(y[1], y[3], pal.start)
}
# formatted table
raw.table <- kable(raw.table, caption = "Raw Comparison of PAL and non-PAL Grades (No Propensity Adjustment)") %>%
kable_styling(full_width= T, position = "left")
return(raw.table)
}
# Data cleaning ----------------------------------------------------------------
clean.data <- function(df)
{
# Replaced coh.term with coh.term.course
yr.course.taken = as.numeric(gsub(".*([0-9]{4})","\\1",df$coh.term.course))
df$delay.from.hs = ifelse(!is.na(yr.course.taken) & !is.na(df$hs.grad.yr),
yr.course.taken-df$hs.grad.yr, NA)
sum(is.na(df$delay.from.hs))
# remove students who did not complete PAL
df=subset(df, palN!=1)
#recode palN to factor with 0/1 levels
df$palN = ifelse(df$palN==2, 1, 0)
#clean up category names in m.rmd and e.rmd
df$m.rmd[df$m.rmd=="Not Remedial\nin Math"]="Not Remedial in Math"
df$m.rmd[df$m.rmd=="Remedial\nin Math"]="Remedial in Math"
df$e.rmd[df$e.rmd=="Not Remedial\nin English"]="Not Remedial in English"
df$e.rmd[df$e.rmd=="Remedial\nin English"]="Remedial in English"
df <- df %>% mutate(m.rmd = factor(m.rmd), e.rmd = factor(e.rmd))
# table(df$e.rmd)
# Create feature, proportion of cumulative units taken that were passes
# To distinguish the students who have taken 0 units from the students who
# have passed 0 units they have taken, students who have taken 0 units are
# labeled as -1. Then the -1 is replaced by the mean of cum.percent.units.passed
df <- df %>%
mutate(cum.percent.units.passed = ifelse(tot.taken.prgrss.start == 0, -1,
tot.passd.prgrss.start / tot.taken.prgrss.start)) %>%
mutate(cum.percent.units.passed = ifelse(cum.percent.units.passed == -1, mean(cum.percent.units.passed, na.rm =TRUE),
cum.percent.units.passed ))
# code instructor as alphabetic letter for anonymity
df$Instructor_01=droplevels(factor(df$Instructor_01))
instructor.vec = sort(unique(df$Instructor_01))
num.instr = length(instructor.vec)
df$Instructor_01 = factor(
df$Instructor_01, levels = instructor.vec, labels=as.character(1:num.instr)
)
key.instr.code = cbind(as.character(instructor.vec), 1:num.instr)
# Add "cMaj", census majors without concentrations/specializations/tracks/etc.
major_lookup <- read.csv("Census Major Lookup.csv", header = TRUE,
stringsAsFactors = FALSE)
df <- merge(df, major_lookup %>% select(censusMajor, cMaj),
by = "censusMajor", all.x = TRUE)
# Recode mother's education and father's education variables.
non.hs.grad= c("No High School","Some High School")
hs.grad= c("High School Graduate","Some College","2-Year College Graduate")
coll.grad= c("4-Year College Graduate","Postgraduate")
parent.ed.levels= c(
"Non-HS Graduate","HS Graduate", "College Graduate", "Unknown"
)
df <- df %>%
mutate(
mother.ed = ifelse(mother.ed %in% non.hs.grad, "Non-HS Graduate",
ifelse(mother.ed %in% hs.grad, "HS Graduate",
ifelse(mother.ed %in% coll.grad, "College Graduate", "Unknown"))),
mother.ed= factor(mother.ed, levels= parent.ed.levels),
father.ed = ifelse(father.ed %in% non.hs.grad,"Non-HS Graduate",
ifelse(father.ed %in% hs.grad, "HS Graduate",
ifelse(father.ed %in% coll.grad, "College Graduate", "Unknown"))),
father.ed= factor(father.ed, levels= parent.ed.levels))
# Recoded adm.area with these counties as local: 'El Dorado', 'Nevada',
# 'Placer', 'Sacramento', 'San Joaquin', 'Solano', 'Yolo'.
counties.rad <- read_csv(
"countiesRadius120mi.csv",
col_types = cols(
state = col_skip(), city = col_skip(), distance.km = col_skip()
)
)
df <- left_join(df, counties.rad, by = "zip")
local.adm.counties <- c(
'El Dorado', 'Nevada', 'Placer', 'Sacramento', 'San Joaquin', 'Solano',
'Yolo'
)
# County will be NA if the zip code is not within 120 mile radius of
# CSUS zip code(95819)
df <- df %>%
mutate(
adm.area =
if_else(!(county %in% local.adm.counties) | is.na(county),
"nonlocal", "local")
) %>%
mutate(sac.county.flg =
if_else(!(county == "Sacramento") | is.na(county), 0, 1)
) %>%
mutate(sac.county.flg = factor(sac.county.flg))
return(df)
}
# Extract prerequisite course grade ---------------------------------------------
get.prereq.grades <- function(course.df, df, prereq) {
# Get student's recent Chem 1B grade
course.stu <- course.df$emplid
prereq.df <- df %>%
select(emplid, course, course.seq, grd.pt.unt, grade) %>%
filter(emplid %in% course.stu, course== prereq) %>%
group_by(emplid) %>%
filter(course.seq == max(course.seq)) %>%
rename(
prereq.course.seq = course.seq, prereq.grd.pt.unt = grd.pt.unt,
prereq.grade = grade
) %>%
select(-course)
dim(prereq.df) # [1] 275 6
prereq.stu <- prereq.df$emplid
course.df <- course.df %>%
filter(emplid %in% prereq.stu)
course.df <- left_join(course.df, prereq.df, by = "emplid")
return(course.df)
}
# Get only the variables that have missing values ---------------------------------------------
get.missing.only <- function(course.df) {
get.missing.only <- course.df %>%
summarise(across(everything(), ~ sum(is.na(.x)))) %>%
gather() %>%
filter(value != 0)
get.missing.only <- course.df %>%
dplyr::select(all_of(get.missing.only$key))
return(get.missing.only)
}
# Get imbalanced variables with SMD > 0.1------------------------------------
get.imbal.vars <- function(tab)
{
get.imbal.vars <- as.data.frame(ExtractSmd(tab))
get.imbal.vars <- get.imbal.vars %>%
rownames_to_column(var = "Variable") %>%
rename(`Before Matching SMD` = `1 vs 2`) %>%
filter(`Before Matching SMD` > 0.1) %>%
arrange(desc(`Before Matching SMD`))
get.imbal.vars <- kable(
get.imbal.vars, caption = "Variables with SMD > 0.1"
) %>%
kable_styling(full_width= F)
return(get.imbal.vars)
}
# Unadjusted means -------------------------------------------------------------
get.unadj.means <- function(df.final)
{
get.unadj.means <- df.final %>%
group_by(palN) %>% summarise(unadj.means = mean(grd.pt.unt)) %>%
pivot_wider(names_from = "palN", values_from = "unadj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Diff. = `PAL`-`Non-PAL`)
get.unadj.means<- kable(
get.unadj.means, caption = "Unadjusted Mean Grades"
) %>%
kable_styling(full_width= F)
return(get.unadj.means)
}
# Adjusted means --------------------------------------------------------------
adj.means <- function(match.list, matched.dat) {
get.adj.means <- matched.dat %>%
group_by(palN) %>%
summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
pivot_wider(names_from = "palN", values_from = "adj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Diff. = `PAL`-`Non-PAL`)
# formatted table
get.adj.means<- kable(get.adj.means, caption = "Adjusted Mean Grades") %>%
kable_styling(full_width= F)
return(get.adj.means)
}
# Match Table ------------------------------------------------------------------
create.match.tab <- function(matched.dat) {
matched.dat <- matched.dat %>%
mutate(pal = if_else(palN == 0, "Non-PAL", "PAL"))
pal.flg <- c('Non-PAL', 'PAL')
for (i in seq_along(pal.flg)) {
multiple.matches <- matched.dat %>%
filter(pal ==pal.flg[i]) %>%
count(id) %>%
filter(n> 1) %>%
summarise(n())
single.matches <- matched.dat %>%
filter(pal == pal.flg[i]) %>%
count(id) %>%
filter(n==1) %>%
summarise(n())
if(pal.flg[i] == 'Non-PAL') {
match.tab <- bind_rows(single.matches, multiple.matches)
match.tab <- match.tab %>%
rename('Non-PAL'= 'n()')
}
pal.matches <- bind_rows(single.matches, multiple.matches)
match.tab$PAL <- pal.matches$`n()`
row.names(match.tab) <- c("Single Matches", "Multiple Matches")
}
match.tab <-rbind(
match.tab, "Total Students" = c(sum(match.tab$`Non-PAL`), sum(match.tab$`PAL`))
)
match.tab <- kable(match.tab, caption = "PAL and Non-PAL Matches") %>%
kable_styling(full_width= F)
return(match.tab)
}
# ATT plot ---------------------------------------------------------------------
# https://livefreeordichotomize.com/2019/01/17/understanding-propensity-score-weighting/
# https://www.csus.edu/brand/colors.html
get.att.plot <- function(df.final, match.list)
{
df.final$p.score <- p.score
df.final <- df.final %>%
select(-id) %>%
rownames_to_column(var = "id")
ps.dat <- df.final %>%
select(id, palN, p.score) %>%
pivot_wider(
names_from = "palN", values_from = "p.score", names_prefix = "b.pal."
)
before.match <- ps.dat %>%
select(b.pal.0, b.pal.1)
matched.dat <- df.final[unlist(match.list[c("index.treated", "index.control")]), ]
matched.dat$match.weights<- c(match.list$weights, match.list$weights)
after.match <-matched.dat %>%
select(-id) %>%
rownames_to_column(var = "id")
after.match <- after.match %>%
pivot_wider(names_from = "palN", values_from = "p.score", names_prefix = "pal.")
after.match <- after.match %>%
select(pal.0, pal.1, match.weights)
get.att.plot <- ggplot() +
geom_histogram(data = before.match, bins = 50, aes(b.pal.1), alpha = 0.5) +
geom_histogram(data = after.match,bins = 50, aes(pal.1, weight = match.weights),
fill = "#043927", alpha = 0.5) +
geom_histogram(data = before.match, bins = 50, alpha = 0.5,
aes(x = b.pal.0, y = -..count..)) +
geom_histogram(data = after.match, bins = 50,
aes(x = pal.0, weight = match.weights, y = -..count..),
fill = "#c4b581", alpha = 0.5) +
ylab("Count") + xlab("Propensity Scores") +
geom_hline(yintercept = 0, lwd = 0.5) +
scale_y_continuous(label = abs)
return(get.att.plot)
}
# Variable Percent Improvement -------------------------------------------------
get.var.perc.tab <- function(list.bal) {
get.var.perc.tab <- list.bal %>%
pluck("Balance") %>%
rownames_to_column("Variable") %>%
dplyr::select("Variable", "Type", "Diff.Un","Diff.Adj") %>%
mutate(`% Improvement` = if_else(Diff.Un == 0, 0, round(((abs(Diff.Un) - abs(Diff.Adj))/ abs(Diff.Un)) * 100 , 0))) %>%
arrange(desc(`% Improvement`))
get.var.perc.tab <- get.var.perc.tab %>% dplyr::select("Variable", "Diff.Un", "Diff.Adj", `% Improvement`)
return(get.var.perc.tab)
}
# Covariate Balance Plots -------------------------------------------------------
# https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
# https://www.csus.edu/brand/colors.html
get.bal.plot <- function(unmatched.tab, matched.tab) {
## Construct a data frame containing variable name and SMD from all methods
dataPlot <- data.frame(variable = rownames(ExtractSmd(unmatched.tab)),
Unmatched = as.numeric(ExtractSmd(unmatched.tab)),
Matched = as.numeric(ExtractSmd(matched.tab)) )
## Create long-format data for ggplot2
dataPlotMelt <- melt(data = dataPlot,
id.vars = c("variable"),
variable.name = "Method",
value.name = "SMD")
## Order variable names by magnitude of SMD
varNames <- as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
## Order factor levels in the same order
dataPlotMelt$variable <- factor(dataPlotMelt$variable,
levels = varNames)
## Plot using ggplot2
# Sac State colors and dashed line
get.bal.plot <-ggplot(
data = dataPlotMelt, mapping =
aes(x = variable, y = SMD, group = Method, color= Method)) +
scale_color_manual(values = c("#043927", "#c4b581")) +
geom_line(aes(linetype = Method)) +
geom_point() +
scale_linetype_manual(values= c("dashed", "solid")) +
geom_hline(yintercept = 0.1, color = "black", size = 0.1) +
coord_flip() +
theme_bw() + theme(legend.key = element_blank())
return(get.bal.plot)
}
# PAL Effect -------------------------------------------------------------------
get.pal.effect <- function(match.list, matched.dat, course) {
get.gamma <- psens(match.list, Gamma=2.0, GammaInc = 0.1)[["bounds"]] %>%
filter(`Lower bound` < 0.05 & 0.05 < `Upper bound`) %>%
slice_min(Gamma) %>%
select(Gamma)
get.pal.effect <- matched.dat %>%
group_by(palN) %>%
summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
pivot_wider(names_from = "palN", values_from = "adj.means") %>%
rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
mutate(Course= course, .before= "Non-PAL") %>%
mutate(Diff. = `PAL`-`Non-PAL`) %>%
mutate(`Std. error`= match.list$se, .after= "Diff.") %>%
mutate(
`p-val`= formatC( 1-pnorm(Diff./`Std. error`), format = "e", digits = 2),
Sensitivity= get.gamma$Gamma,
`N(non-PAL)`= length(unique(match.list$index.control)),
`N(PAL)`= match.list$wnobs
)
return(get.pal.effect)
}
Specialized functions for each course.
## BIO 22 ====================================================================
## Filter to relevant variables
bio22.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 'course.count',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(bio22.dat))
bio22.final <- bio22.dat[ ,new.vars]
return(bio22.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
bio22.step <- function(final.df) {
# AP_CALAB
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score +
sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg,
data= bio22.final, family=binomial
)
summary(min.model)
biggest <- formula(glm(palN ~. - grd.pt.unt, data=bio22.final, family=binomial))
bio22.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
summary(bio22.step.first.order)
bio22.step.first.order$anova
model.first.order <- formula(bio22.step.first.order)
bio22.first.order.prop.model <- glm(
model.first.order, data=bio22.final, family=binomial
)
return(bio22.first.order.prop.model)
}
## CHEM 1A ====================================================================
## Filter to relevant variables
chem1a.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus','palN', 'grd.pt.unt', 'sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem1a.dat))
chem1a.final <- chem1a.dat[ ,new.vars]
return(chem1a.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem1a.step <- function(final.df) {
# Stepwise selection selected AP_CALAB.flg, AP_BIOL.flg, AP_CHEM, and
# AP_CHEM.flg
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score +
sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg + AP_BIOL +
AP_BIOL.flg + AP_CHEM + AP_CHEM.flg, data= chem1a.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt, data=chem1a.final, family=binomial)
)
chem1a.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2
)
summary(chem1a.step.first.order)
chem1a.step.first.order$anova
model.first.order <- formula(chem1a.step.first.order)
chem1a.first.order.prop.model <- glm(
model.first.order, data=chem1a.final, family=binomial
)
return(chem1a.first.order.prop.model)
}
## CHEM 1B ====================================================================
## Filter to relevant variables
chem1b.step.vars <- function(course.df) {
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1a.grd.pt.unt',
'AP_BIOL', 'AP_CALAB', 'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg',
'AP_CALBC.flg', 'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem1b.dat))
chem1b.final <- chem1b.dat[ ,new.vars]
return(chem1b.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem1b.step <- function(final.df) {
# Stepwise selection selected AP_BIOL.flg and AP_CHEM.flg
# Removed AP_BIOL.flg. Then stepwise selection selected AP_CALAB.flg.
# Removed AP_CALAB.flg and pct.female.head.flg
min.model <- glm(
palN ~ chem1a.grd.pt.unt + cum.percent.units.passed + eth.erss + gender +
AP_CHEM + AP_CHEM.flg, data= chem1b.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt - AP_BIOL.flg - AP_CALAB.flg - pct.female.head.flg,
data=chem1b.final, family=binomial)
)
chem1b.step.first.order <- step(min.model,
direction="forward",scope = biggest,
trace=FALSE, k=2)
summary(chem1b.step.first.order)
chem1b.step.first.order$anova
model.first.order <- formula(chem1b.step.first.order)
chem1b.first.order.prop.model <- glm(model.first.order, data=chem1b.final, family=binomial)
return(chem1b.first.order.prop.model)
}
## CHEM 4 ====================================================================
## Filter to relevant variables
chem4.step.vars <- function(course.df)
{
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt','sat.math.score',
'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem4.dat))
chem4.final <- chem4.dat[ ,new.vars]
return(chem4.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem4.step <- function(final.df)
{
# "AP_BIOL"
min.model <- glm(
palN ~ cum.percent.units.passed + eth.erss + gender+ sat.math.score +
sat.verbal.score+sat.math.flg + AP_CALAB+AP_CALAB.flg, data= chem4.final,
family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~. - grd.pt.unt - AP_BIOL, data=chem4.final, family=binomial)
)
chem4.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
summary(chem4.step.first.order)
chem4.step.first.order$anova
model.first.order <- formula(chem4.step.first.order)
chem4.first.order.prop.model <- glm(
model.first.order, data=chem4.final, family=binomial
)
return(chem4.first.order.prop.model)
}
## CHEM 24 ====================================================================
## Filter to relevant variables
chem24.step.vars <- function(course.df)
{
vars.to.keep <- c(
'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',
'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason', 'sac.county.flg',
'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1b.grd.pt.unt',
'chem1b.term.gpa', 'chem1b.units.attempted', 'AP_BIOL', 'AP_CALAB',
'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg', 'AP_CALBC.flg',
'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
)
new.vars <- intersect(vars.to.keep, names(chem24.dat))
chem24.final <- chem24.dat[ ,new.vars]
return(chem24.final)
}
## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem24.step <- function(final.df) {
min.model <- glm(
palN ~ chem1b.grd.pt.unt + cum.percent.units.passed + eth.erss + gender,
data= chem24.final, family=binomial
)
summary(min.model)
biggest <- formula(
glm(palN ~.- grd.pt.unt - acad.stand - reason - pct.female.head.flg,
data=chem24.final, family=binomial)
)
chem24.step.first.order <- step(
min.model, direction="forward", scope = biggest, trace=FALSE, k=2
)
summary(chem24.step.first.order)
chem24.step.first.order$anova
model.first.order <- formula(chem24.step.first.order)
chem24.first.order.prop.model <- glm(
model.first.order, data=chem24.final, family=binomial
)
return(chem24.first.order.prop.model)
}
Make sure the PAL datafile in the same directory as this RMarkdown file.
PALdatafull <- read_rds("paldatafull_csv.rds")
dim(PALdatafull)
## [1] 1099371 174
sum(PALdatafull$grd.pt.unt)
## [1] 2237555
The files which includes data through the Spring 2019 semester has 1099371 rows and 174 columns. The total of the grd.pt.unt column is 2237555.
chem.classes <- paste("CHEM", c(4, '1A', '1B', 24))
chem.dat <- PALdatafull %>%
filter(base.time.course == 1, course %in% chem.classes) %>%
mutate(course = factor(course, levels = chem.classes))
dim(chem.dat) # 18948 174
## [1] 18948 174
num.stu <- dim(chem.dat)[1]
num.vars <- dim(chem.dat)[2]
There are 18948 rows and 174 variables. Each row is a chemistry student. So, there is a total of 18948 chemistry students.
There are 83 first attempt only Chem 24 Spring 2019 students. Some of them are incorrectly labeled as non-PAL and need to be relabeled.
with(chem.dat %>%
filter(base.time.course == 1, pass.term.flg == "PASS Term", course == "CHEM 24", term == "Spring 2019", course.seq == 0),
table(palN))
## palN
## 0
## 83
chem.dat <- update.chem24s19(chem.dat)
with(chem.dat %>%
filter(base.time.course == 1, pass.term.flg == "PASS Term", course == "CHEM 24", term == "Spring 2019", course.seq == 0),
table(palN))
## palN
## 0 1 2
## 28 9 46
# 0 1 2
# 28 9 46
After relabeling, there are 28 non-PAL students, 9 incomplete PAL students, and 46 PAL students for Chem 24 Spring 2019.
The course.seq variable indicate how many times a student has taken a course prior to the current attempt. To filter on the first attempt at a course, we set course.seq to 0.
Note: Excludes incomplete PAL students
get.raw.tab(chem.classes, chem.dat)
class | nonPALavg | PALavg | Diff | NonPAL_Num | PAL_Num | CompletePAL | TermPALStart |
---|---|---|---|---|---|---|---|
CHEM 4 | 2.03 | 2.39 | 0.36 | 1929 | 759 | 0.28 | 2123 |
CHEM 1A | 1.63 | 2.04 | 0.41 | 1717 | 1055 | 0.37 | 2128 |
CHEM 1B | 1.70 | 2.11 | 0.41 | 1090 | 769 | 0.40 | 2138 |
CHEM 24 | 1.63 | 2.06 | 0.43 | 224 | 177 | 0.43 | 2178 |
Create new variables.
delay.from.hs: delay since high school
cum.percent.units.passed: cumulative percent of units passed
cMaj: census majors without concentrations/specializations/tracks/etc.
county: which county did the student live in at the time of application to Sac state
sac.county.flg: did the student live in Sacramento county at the time of application to Sac State
Collapse sparse categories and other miscellaneous clean up of data. Sparse categories can cause complete separation in logistic regression and are only predictive for a few students.
# Check how many students did not complete PAL
sum(chem.dat$palN==1) # 226
## [1] 226
incl.pal.stu <- sum(chem.dat$palN==1)
chem.dat <- clean.data(chem.dat)
dim(chem.dat) # 18722 179
## [1] 18722 179
There were 226 chemistry students who did not complete PAL and were removed from the analysis. There are now 18722 chemistry students instead of 18948.
There were originally 174 variables in the data set, 5 variables were added, so there are now 179 total variables in the data set.
Based on data for 769 PAL students and 1090 non-PAL students, the unadjusted, raw difference in average grade for PAL and non-PAL students was 0.41 on a A=4.0 grade scale. However, since students self-select into supplemental PAL instruction, it is possible that the resulting PAL and non-PAL groups were not balanced with respect to other characteristics which could impact course grade. For example, if students with better study habits tend to enroll in PAL, all else being equal, the PAL mean grade would be higher than non-PAL– even if PAL had no effect on course grade. Consequently, we also performed a propensity score analysis to adjust the estimated effect of PAL on course grade for potential self-selection biases.
After adjusting for self-selection bias, we found that PAL students earned an average grade \(0.43\pm 0.08\) higher than non-PAL students. A sensitivity analysis indicates that this analysis is moderately sensitive to unknown confounders. Although the data give us sufficient evidence to conclude that PAL increases students’ grades in Chem 1B, the existence of an unknown confounder similar in magnitude to living in on-campus housing during their first year, ethnicity, or major would nullify that conclusion.
A propensity score analysis was conducted to assess the effect of PAL supplemental instruction on Chem 1B course grade. Propensity score adjustment was necessary since the data are observational and the characteristics of students who voluntarily enroll in PAL may differ in ways that may, independently of PAL, impact course grade compared to students who do not enroll in PAL. In propensity score analysis, variables related to both likelihood of PAL enrollment and course grade (confounders) are used in a logistic regression model to obtain a propensity score, which is a student’s likelihood of enrolling in PAL.
For Chem 1B, 13 covariates were found to have a statistically significant relationship to likelihood of enrolling in PAL. Variables related to increased likelihood of enrolling were: ethnicity, has an AP Chemistry exam score, enrollment in PAL in the past, class level, being remedial in math, being eligible for a Pell grant when entering CSUS, academic major, CSUS GPA at start of term, and from CSUS local admission area.
Using the propensity score model, all students in the dataset, PAL and non-PAL, are assigned a propensity score. Then, each PAL student is matched to one or more non-PAL students who have similar propensity score(s). After matching, the PAL and matched non-PAL groups are compared to determine if the distribution of each covariate is similar between the two groups. This is called a balance check. If the standardized difference between the non-PAL and PAL means is less than 0.10 then the strong criteria in (Leite 2017, p.10) is met for covariate balance. If the standardized difference is under 0.25, then a more lenient criteria is met. The highest absolute value standardized mean difference in this analysis is 0.0733. Consequently, adequate balance appears to have been achieved.
The difference in the average grade for the matched PAL and non-PAL data is then calculated. The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is \(0.43\pm 0.08\) or between 0.35 and 0.51 on a 4.0 grade scale. This result is statistically significant with a P-value of \(8.59x10^{-9}\) and is based on 573 PAL students and 457 non-PAL students. For comparison, the non-propensity score adjusted difference in average grade for PAL and non-PAL students was 0.41.
The estimated PAL effect is based on the assumption that the propensity model includes all potential confounders for PAL enrollment and grade in Chem 1B. However, it is possible that unknown confounders exist. A sensitivity analysis was conducted to determine how strong an unknown confounder must be to nullify the statistically significant PAL effect that was found in this analysis. The sensitivity analysis (Rosenbaum, 2002) indicated that an unknown confounder which increases the odds of being in PAL by more than 1.8 is enough to change the treatment effect from significant to non-significant. Inspection of the covariates in the estimated propensity model for Chem 1B indicates that if there is an unknown confounder that has an effect on the propensity score similar to the effect of being remedial in math, class level, or has an AP Chemistry exam score observed in this analysis, the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works (which is not in our dataset) is an unknown confounder which could reverse the statistical significance of this analysis.
Additionally, a number of variables were removed from this analysis due to large amounts of missingness. Since all students who had missing information on any included covariate were eliminated from the analysis, a balance had to be struck between retaining a sufficiently large pool of PAL and non-PAL students and retaining a sufficient number of important covariates. Variables which were eliminated from this analysis had substantial missing data or were subjectively judged as unlikely to be confounding. The choices about which variables to retain resulted in the original pool of 769 PAL students in Chem 1B being reduced to 573. Also, 457 non-PAL students were selected out of 1090 original non-PAL students.
When a PAL student had more than one suitable match among the non-PAL students, all non-PAL students were taken as matches and weighted appropriately in the final estimated PAL effect. There were 991 non-PAL matches. Of the 573 PAL students, 331 were matched one-to-one with non-PAL students and 242 were matched one-to-many with non-PAL students.
The non-PAL and PAL groups will include students with only first attempts at CHEM 1B.They will also include students with previous PAL participation and/or are currently in a PAL for another course.
For the prerequisite course CHEM 1A, the grade for the student’s last attempt and the number of times it was taken are added to the CHEM 1B data set. Only 1416 out of 1859 CHEM 1B students have CHEM 1A grades.
However, few students retook CHEM 1A so there was inadequate balance on number of times Chem 1B was taken, and it was removed from the propensity model after balance checks.
# Excludes course repeats
chem1b.dat <- chem.dat %>%
filter(course=="CHEM 1B", pass.term.flg == "PASS Term", course.seq== 0)
dim(chem1b.dat) # 1859 179
## [1] 1859 179
prereq <- "CHEM 1A"
chem1b.dat <- get.prereq.grades(chem1b.dat, chem.dat, prereq)
# Rename CHEM 1A variables
chem1b.dat <- chem1b.dat %>%
rename(chem1a.course.seq= prereq.course.seq, chem1a.grd.pt.unt= prereq.grd.pt.unt, chem1a.grade = prereq.grade)
dim(chem1b.dat) # 1416 182
## [1] 1416 182
There are 1,416 CHEM 1B first attempt only students with prerequisite CHEM 1A grades.
The variables below were added to added to the CHEM 1B data, so there are now 182 variables instead of 179 variables.
chem1a.course.seq: Is the student taking CHEM 1A for the first time or is it the second attempt, third attempt, etc.
chem1a.grd.pt.unt: Numeric grade on 0 to 4 scale (0=F, 4=A) for CHEM 1A
chem1a.grade: Course grade for CHEM 1A
Collapse ‘cMaj’ variable separately for each course since the amount of collapsing necessary will vary by course.
# # Collapsed cMaj categories to Biology and Other majors at 0.04
# with(chem1b.dat, table(cMaj, palN))
chem1b.dat <- group_category(data = chem1b.dat, feature = "cMaj", threshold = 0.04, update = TRUE)
with(chem1b.dat, table(cMaj, palN))
## palN
## cMaj 0 1
## Biology 433 338
## Chemistry 119 83
## Geology 7 5
## Health Science 7 4
## Kinesiology/Physical Education 129 107
## Nutrition 48 19
## OTHER 36 22
## Physics 33 2
## Undeclared 12 12
Remove variables having too many missing values in order to retain a larger pool of PAL and non-PAL students.
## [1] 35
## feature num_missing pct_missing
## 22 deg.plan3 1416 1.0000000
## 23 deg.plan4 1416 1.0000000
## 24 deg.plan5 1416 1.0000000
## 25 deg.plan6 1416 1.0000000
## 19 withdraw_reason 1414 0.9985876
## 21 deg.plan2 1393 0.9837571
## 4 pledge.term 1251 0.8834746
## 11 trf.gpaADM 1203 0.8495763
## 18 treat.section 824 0.5819209
## 1 fys.term.code 778 0.5494350
## 2 fys.grd 778 0.5494350
## 3 fys.rpt.flg 778 0.5494350
## 17 Instructor_02 723 0.5105932
## 27 grad.termERS 670 0.4731638
## 20 deg.plan1 656 0.4632768
## 26 grad.term 656 0.4632768
## 28 ttg 656 0.4632768
## 29 plan.college 607 0.4286723
## 30 plan.college.desc 607 0.4286723
## 31 plan.dept 607 0.4286723
## 32 plan.deptAbbr 607 0.4286723
## 33 plan.degree 607 0.4286723
## 34 plan.type 607 0.4286723
## 5 sat.math.score 277 0.1956215
## 6 sat.math.flg 277 0.1956215
## 7 sat.verbal.score 277 0.1956215
## 8 sat.verbal.flg 277 0.1956215
## 9 sat.test.date 277 0.1956215
## 13 ge.critical.thinking.status 260 0.1836158
## 14 ge.english.comp.status 260 0.1836158
## 15 ge.math.status 260 0.1836158
## 16 ge.oral.comm.status 260 0.1836158
## 12 admit.term 256 0.1807910
## 10 hs.gpa 176 0.1242938
## 35 county 166 0.1172316
## [1] 1416 147
35 variables missing >10%
So, 35 variables were removed due to missingness and there are now 147 variables instead of 182 variables.
chem1b.dat <- chem1b.dat[complete.cases(chem1b.dat), ]
dim(chem1b.dat) # 1337 147
## [1] 1337 147
1337 out of 1416 students are kept
79 students were removed due to missingness of variables
single.vars <- chem1b.dat %>%
summarise(across(everything(), ~ n_distinct(.x))) %>%
select_if(. == 1)
# Table of variables with single values
CreateTableOne(vars = names(single.vars), data = chem1b.dat)
##
## Overall
## n 1337
## country = USA (%) 1337 (100.0)
## career.course = UGRD (%) 1337 (100.0)
## acad.prog.course = UGD (%) 1337 (100.0)
## course (%)
## CHEM 4 0 ( 0.0)
## CHEM 1A 0 ( 0.0)
## CHEM 1B 1337 (100.0)
## CHEM 24 0 ( 0.0)
## component = LEC (%) 1337 (100.0)
## units (mean (SD)) 5.00 (0.00)
## course.numeric (mean (SD)) 1.00 (0.00)
## div = Lower Division (%) 1337 (100.0)
## course.seq (mean (SD)) 0.00 (0.00)
## rpt.flg = First Attempt (%) 1337 (100.0)
## pass = Non-PASS (%) 1337 (100.0)
## c2s = Non-C2S (%) 1337 (100.0)
## base.time.course (mean (SD)) 1.00 (0.00)
## years (mean (SD)) 0.50 (0.00)
## withdraw_code = NWD (%) 1337 (100.0)
## enrl.flg = Enrolled (%) 1337 (100.0)
## enrl.flgERS = Enrolled (%) 1337 (100.0)
## rtn.flg = Retained (%) 1337 (100.0)
## rtn.flgERS = Retained (%) 1337 (100.0)
## pass.term.flg = PASS Term (%) 1337 (100.0)
## passN (mean (SD)) 0.00 (0.00)
## csus.gpa.start.flg = Not Missing (%) 1337 (100.0)
## higher.ed.gpa.start.flg = Not Missing (%) 1337 (100.0)
sum(single.vars) # 23
## [1] 23
# remove single-valued variables
chem1b.dat<- chem1b.dat %>%
dplyr::select(-names(single.vars))
dim(chem1b.dat) # 1337 124
## [1] 1337 124
124 out of 147 variables are kept
23 variables removed due to single values
# Remove non chem1b instructors
chem1b.dat <- chem1b.dat %>%
droplevels(chem1b.dat$Instructor_01)
# Combine sparse ethnicity categories to Other
chem1b.dat <- chem1b.dat %>%
mutate(eth.erss = fct_other(eth.erss, drop = c("Native American", "Pacific Islander")))
with(chem1b.dat, table(eth.erss, palN))
## palN
## eth.erss 0 1
## African American 31 21
## Asian 233 208
## Foreign 8 6
## Hispanic 167 154
## Two or More Races 52 31
## Unknown 34 13
## White 227 137
## Other 12 3
Sujective judgment was used to narrow the pool of variables down to those likely to be confounders. It’s important to include all variables correlated with outcome even if it is uncertain whether they are related to likeihood of enrolling in PAL. This allows for a more precise estimate of the treatment effect.
chem1b.final <- chem1b.step.vars(chem1b.dat)
kable(names(chem1b.final))
x |
---|
acad.stand |
adm.area |
bot.level |
cMaj |
coh |
course.age |
csus.gpa.start |
cum.percent.units.passed |
delay.from.hs |
e.rmd |
eth.erss |
father.ed |
fys.flg |
gender |
hous.coh.term.flg |
Instructor_01 |
median.income |
m.rmd |
mother.ed |
pct.female.head |
pell.coh.term.flg |
prevPAL |
prevPASS |
reason |
sac.county.flg |
term.units.attemptedCensus |
palN |
grd.pt.unt |
chem1a.grd.pt.unt |
AP_BIOL |
AP_CALAB |
AP_CALBC |
AP_CHEM |
AP_BIOL.flg |
AP_CALAB.flg |
AP_CALBC.flg |
AP_CHEM.flg |
pct.female.head.flg |
med.inc.flg |
Subjectively identified four potential confounders to force the model to retain: cum.percent.units.passed, gender, eth.erss, and chem1a.grd.pt.unt. Stepwise variable selection will be used to select which of the variables currently in the PAL dataset to include in the propensity model.
chem1b.final <- chem1b.step.vars(chem1b.dat)
chem1b.first.order.prop.model <- chem1b.step(chem1b.final)
summary(chem1b.first.order.prop.model)
##
## Call:
## glm(formula = model.first.order, family = binomial, data = chem1b.final)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.0704 -1.0105 -0.6102 1.0971 2.5795
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.4187425 1.1236815 -2.153 0.031357 *
## chem1a.grd.pt.unt -0.0952206 0.1183413 -0.805 0.421035
## cum.percent.units.passed 0.4253555 1.0331478 0.412 0.680553
## eth.erssAsian 0.2258778 0.3168833 0.713 0.475963
## eth.erssForeign 0.1193382 0.6437159 0.185 0.852923
## eth.erssHispanic 0.2176552 0.3213975 0.677 0.498269
## eth.erssTwo or More Races -0.0008854 0.3852899 -0.002 0.998166
## eth.erssUnknown -0.4839073 0.4644237 -1.042 0.297434
## eth.erssWhite -0.0732071 0.3225052 -0.227 0.820427
## eth.erssOther -1.5590171 0.7498221 -2.079 0.037601 *
## genderMale 0.1921808 0.1281980 1.499 0.133849
## AP_CHEM 0.0172767 0.3201902 0.054 0.956969
## AP_CHEM.flgNot Missing -0.8156336 0.2959788 -2.756 0.005856 **
## prevPAL 0.5954449 0.0787251 7.564 3.92e-14 ***
## bot.levelJunior -0.5557096 0.3064733 -1.813 0.069795 .
## bot.levelSenior -0.7147519 0.3160084 -2.262 0.023709 *
## bot.levelSophomore -0.0971246 0.3014493 -0.322 0.747307
## m.rmdRemedial in Math 0.6764007 0.1892499 3.574 0.000351 ***
## pell.coh.term.flgPell 0.4699412 0.1222645 3.844 0.000121 ***
## cMajChemistry -0.0751801 0.1856516 -0.405 0.685512
## cMajGeology 0.2741545 0.6425678 0.427 0.669631
## cMajHealth Science -0.1534683 0.6711026 -0.229 0.819117
## cMajKinesiology/Physical Education 0.2702222 0.1694312 1.595 0.110739
## cMajNutrition -0.3721911 0.3149924 -1.182 0.237369
## cMajOTHER 0.0981896 0.3051662 0.322 0.747636
## cMajPhysics -2.2262195 0.7492173 -2.971 0.002965 **
## cMajUndeclared 0.2144541 0.4430973 0.484 0.628394
## csus.gpa.start 0.4743598 0.2143101 2.213 0.026868 *
## adm.areanonlocal -0.2446215 0.1375927 -1.778 0.075426 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 1826.1 on 1336 degrees of freedom
## Residual deviance: 1641.8 on 1308 degrees of freedom
## AIC: 1699.8
##
## Number of Fisher Scoring iterations: 5
p.score <- chem1b.first.order.prop.model$fitted.values
chem1b.covs <- names(chem1b.first.order.prop.model %>% pluck("model") %>% dplyr::select(-palN))
# Unadjusted mean grades
get.unadj.means(chem1b.final)
Non-PAL | PAL | Diff. |
---|---|---|
1.859686 | 2.187609 | 0.3279232 |
Standardized mean differences for continuous variables and categorical variables.
unmatched.tab <- CreateTableOne(vars = chem1b.covs, strata = "palN",
data = chem1b.final, smd = TRUE, test = FALSE)
print(unmatched.tab, smd = TRUE)
## Stratified by palN
## 0 1 SMD
## n 764 573
## chem1a.grd.pt.unt (mean (SD)) 2.59 (0.68) 2.58 (0.64) 0.006
## cum.percent.units.passed (mean (SD)) 0.92 (0.08) 0.92 (0.08) 0.008
## eth.erss (%) 0.249
## African American 31 ( 4.1) 21 ( 3.7)
## Asian 233 (30.5) 208 (36.3)
## Foreign 8 ( 1.0) 6 ( 1.0)
## Hispanic 167 (21.9) 154 (26.9)
## Two or More Races 52 ( 6.8) 31 ( 5.4)
## Unknown 34 ( 4.5) 13 ( 2.3)
## White 227 (29.7) 137 (23.9)
## Other 12 ( 1.6) 3 ( 0.5)
## gender = Male (%) 308 (40.3) 223 (38.9) 0.029
## AP_CHEM (mean (SD)) 1.97 (0.26) 1.96 (0.18) 0.037
## AP_CHEM.flg = Not Missing (%) 55 ( 7.2) 19 ( 3.3) 0.175
## prevPAL (mean (SD)) 0.63 (0.77) 1.02 (0.80) 0.499
## bot.level (%) 0.210
## Freshman 32 ( 4.2) 29 ( 5.1)
## Junior 270 (35.3) 211 (36.8)
## Senior 256 (33.5) 141 (24.6)
## Sophomore 206 (27.0) 192 (33.5)
## m.rmd = Remedial in Math (%) 75 ( 9.8) 106 (18.5) 0.251
## pell.coh.term.flg = Pell (%) 358 (46.9) 343 (59.9) 0.263
## cMaj (%) 0.299
## Biology 399 (52.2) 324 (56.5)
## Chemistry 114 (14.9) 79 (13.8)
## Geology 6 ( 0.8) 5 ( 0.9)
## Health Science 7 ( 0.9) 4 ( 0.7)
## Kinesiology/Physical Education 120 (15.7) 107 (18.7)
## Nutrition 44 ( 5.8) 18 ( 3.1)
## OTHER 32 ( 4.2) 22 ( 3.8)
## Physics 30 ( 3.9) 2 ( 0.3)
## Undeclared 12 ( 1.6) 12 ( 2.1)
## csus.gpa.start (mean (SD)) 3.16 (0.45) 3.21 (0.41) 0.105
## adm.area = nonlocal (%) 231 (30.2) 149 (26.0) 0.094
Check how many variables have SMD > 0.1
addmargins(table(ExtractSmd(unmatched.tab) > 0.1))
##
## FALSE TRUE Sum
## 5 8 13
get.imbal.vars(unmatched.tab)
Variable | Before Matching SMD |
---|---|
prevPAL | 0.4992792 |
cMaj | 0.2985656 |
pell.coh.term.flg | 0.2628653 |
m.rmd | 0.2510048 |
eth.erss | 0.2493008 |
bot.level | 0.2101597 |
AP_CHEM.flg | 0.1746490 |
csus.gpa.start | 0.1050944 |
8 variables have SMD >0.1
Implement a propensity score matching method.
match.chem1b <- with(chem1b.final, Match(
Y=chem1b.final$grd.pt.unt, Tr = chem1b.final$palN, X = p.score,
BiasAdjust = F, estimand = "ATT", M=1, caliper=0.25, replace = TRUE, ties = TRUE))
Standardized mean differences for continuous variables and categorical variables.
# Needed for match table
chem1b.final <- chem1b.final %>%
rownames_to_column(var = "id")
# Matched data
chem1b.matched.dat <- chem1b.final[unlist(match.chem1b[c("index.treated", "index.control")]), ]
chem1b.matched.dat$match.weights<- c(match.chem1b$weights, match.chem1b$weights)
# Add match weights to match data
weighted.dat<-svydesign(id=~1,weights=~match.weights, data = chem1b.matched.dat)
# Variable Summary Table for matched data with match weights
matched.tab <-svyCreateTableOne(vars = chem1b.covs, strata = "palN", data= weighted.dat, smd = TRUE, test = FALSE)
print(matched.tab, smd = TRUE)
## Stratified by palN
## 0 1 SMD
## n 573.00 573.00
## chem1a.grd.pt.unt (mean (SD)) 2.54 (0.67) 2.58 (0.64) 0.071
## cum.percent.units.passed (mean (SD)) 0.92 (0.08) 0.92 (0.08) 0.002
## eth.erss (%) 0.176
## African American 33.9 ( 5.9) 21.0 ( 3.7)
## Asian 189.9 (33.1) 208.0 (36.3)
## Foreign 7.5 ( 1.3) 6.0 ( 1.0)
## Hispanic 153.9 (26.9) 154.0 (26.9)
## Two or More Races 48.8 ( 8.5) 31.0 ( 5.4)
## Unknown 11.0 ( 1.9) 13.0 ( 2.3)
## White 125.7 (21.9) 137.0 (23.9)
## Other 2.2 ( 0.4) 3.0 ( 0.5)
## gender = Male (%) 195.8 (34.2) 223.0 (38.9) 0.099
## AP_CHEM (mean (SD)) 1.96 (0.11) 1.96 (0.18) 0.003
## AP_CHEM.flg = Not Missing (%) 10.9 ( 1.9) 19.0 ( 3.3) 0.089
## prevPAL (mean (SD)) 1.04 (0.92) 1.02 (0.80) 0.025
## bot.level (%) 0.078
## Freshman 37.9 ( 6.6) 29.0 ( 5.1)
## Junior 218.6 (38.2) 211.0 (36.8)
## Senior 135.4 (23.6) 141.0 (24.6)
## Sophomore 181.2 (31.6) 192.0 (33.5)
## m.rmd = Remedial in Math (%) 106.2 (18.5) 106.0 (18.5) 0.001
## pell.coh.term.flg = Pell (%) 325.4 (56.8) 343.0 (59.9) 0.062
## cMaj (%) 0.118
## Biology 298.0 (52.0) 324.0 (56.5)
## Chemistry 85.5 (14.9) 79.0 (13.8)
## Geology 3.6 ( 0.6) 5.0 ( 0.9)
## Health Science 6.8 ( 1.2) 4.0 ( 0.7)
## Kinesiology/Physical Education 119.0 (20.8) 107.0 (18.7)
## Nutrition 19.1 ( 3.3) 18.0 ( 3.1)
## OTHER 28.7 ( 5.0) 22.0 ( 3.8)
## Physics 2.0 ( 0.3) 2.0 ( 0.3)
## Undeclared 10.2 ( 1.8) 12.0 ( 2.1)
## csus.gpa.start (mean (SD)) 3.18 (0.42) 3.21 (0.41) 0.055
## adm.area = nonlocal (%) 147.6 (25.8) 149.0 (26.0) 0.005
Continuous variables: Standardized mean differences are computed by using the standard deviation of treated group
Binary variables: Raw differences in proportion
All variables are balanced and under the <0.1 mean threshold.
## Balance Measures
## All covariates are balanced.
##
## Balance tally for mean differences
## count
## Balanced, <0.1 32
## Not Balanced, >0.1 0
##
## Variable with the greatest mean difference
## Variable Diff.Adj M.Threshold
## chem1a.grd.pt.unt 0.0733 Balanced, <0.1
##
## Sample sizes
## Control Treated
## All 764. 573
## Matched (ESS) 214.85 573
## Matched (Unweighted) 457. 573
## Unmatched 307. 0
get.var.perc.tab(chem1b.bal)
## Variable Diff.Un Diff.Adj % Improvement
## 1 p.score 0.7985494927 0.0016913096 100
## 2 eth.erss_Hispanic 0.0501745201 0.0002326934 100
## 3 m.rmd_Remedial in Math 0.0868237347 -0.0002908668 100
## 4 cMaj_Physics -0.0357766143 0.0000000000 100
## 5 prevPAL 0.4896307024 -0.0266280848 95
## 6 AP_CHEM -0.0460365334 0.0026776474 94
## 7 adm.area_nonlocal -0.0423211169 0.0023851076 94
## 8 cMaj_Nutrition -0.0261780105 -0.0020069808 92
## 9 bot.level_Senior -0.0890052356 0.0098312973 89
## 10 eth.erss_Other -0.0104712042 0.0013089005 88
## 11 eth.erss_Unknown -0.0218150087 0.0034322280 84
## 12 cum.percent.units.passed -0.0084163831 0.0017311130 79
## 13 pell.coh.term.flg_Pell 0.1300174520 0.0307737056 76
## 14 bot.level_Sophomore 0.0654450262 0.0189063409 71
## 15 eth.erss_White -0.0580279232 0.0197498546 66
## 16 AP_CHEM.flg_Not Missing -0.0388307155 0.0141942990 63
## 17 csus.gpa.start 0.1110693721 0.0559114629 50
## 18 eth.erss_Asian 0.0580279232 0.0315299593 46
## 19 cMaj_Undeclared 0.0052356021 0.0031413613 40
## 20 cMaj_Kinesiology/Physical Education 0.0296684119 -0.0209714951 29
## 21 bot.level_Junior 0.0148342059 -0.0132635253 11
## 22 eth.erss_Foreign 0.0000000000 -0.0026178010 0
## 23 cMaj_Chemistry -0.0113438045 -0.0113728912 0
## 24 cMaj_Biology 0.0431937173 0.0454043048 -5
## 25 bot.level_Freshman 0.0087260035 -0.0154741129 -77
## 26 cMaj_Health Science -0.0021815009 -0.0047993019 -120
## 27 eth.erss_Two or More Races -0.0139616056 -0.0310936591 -123
## 28 cMaj_Geology 0.0008726003 0.0023560209 -170
## 29 cMaj_OTHER -0.0034904014 -0.0117510180 -237
## 30 gender_Male -0.0139616056 0.0474694590 -240
## 31 eth.erss_African American -0.0039267016 -0.0225421757 -474
## 32 chem1a.grd.pt.unt -0.0065835834 0.0732743689 -1013
get.bal.plot(unmatched.tab, matched.tab)
love.plot(chem1b.bal,binary = "raw", stars = "std", var.order = "unadjusted",
thresholds = c(m = .1), abs = F)
create.match.tab(chem1b.matched.dat)
Non-PAL | PAL | |
---|---|---|
Single Matches | 168 | 331 |
Multiple Matches | 289 | 242 |
Total Students | 457 | 573 |
Out of 573 PAL students, 331 PAL students were matched to one non-PAL student and 242 PAL students were matched to multiple non-PAL students.
Out of 991 non-PAL student matches, there were 457 non-PAL students, 168 of the non-PAL students were matched to one PAL student and 289 of the non-PAL students were matched to multiple PAL students.
get.att.plot(chem1b.final, match.chem1b)
The standardized mean differences of the prognostic scores is 0.0751, which indicates balance. All variables are under the 0.01 mean difference threshold. It is likely that the effect estimate will be relatively unbiased, since the estimated prognostic score is balanced.
##
## Call:
## glm(formula = f.build("grd.pt.unt", chem1b.covs), data = ctrl.data)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.38485 -0.44720 0.07171 0.50204 2.90517
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -2.560100 0.506831 -5.051 5.54e-07 ***
## chem1a.grd.pt.unt 0.383550 0.056839 6.748 3.04e-11 ***
## cum.percent.units.passed 0.263816 0.493095 0.535 0.5928
## eth.erssAsian 0.173864 0.153593 1.132 0.2580
## eth.erssForeign 0.230472 0.316260 0.729 0.4664
## eth.erssHispanic 0.120339 0.154148 0.781 0.4352
## eth.erssTwo or More Races 0.224773 0.180187 1.247 0.2126
## eth.erssUnknown 0.213163 0.199194 1.070 0.2849
## eth.erssWhite 0.209831 0.154464 1.358 0.1747
## eth.erssOther 0.663180 0.269936 2.457 0.0142 *
## genderMale 0.137315 0.062484 2.198 0.0283 *
## AP_CHEM 0.112366 0.113839 0.987 0.3239
## AP_CHEM.flgNot Missing 0.062124 0.113912 0.545 0.5857
## prevPAL -0.161055 0.039232 -4.105 4.49e-05 ***
## bot.levelJunior 0.054140 0.158553 0.341 0.7329
## bot.levelSenior 0.032432 0.160796 0.202 0.8402
## bot.levelSophomore -0.021684 0.155768 -0.139 0.8893
## m.rmdRemedial in Math -0.077182 0.104382 -0.739 0.4599
## pell.coh.term.flgPell -0.079351 0.059006 -1.345 0.1791
## cMajChemistry 0.148660 0.090582 1.641 0.1012
## cMajGeology 0.131348 0.324729 0.404 0.6860
## cMajHealth Science 0.535358 0.304482 1.758 0.0791 .
## cMajKinesiology/Physical Education 0.091306 0.083899 1.088 0.2768
## cMajNutrition -0.289441 0.129282 -2.239 0.0255 *
## cMajOTHER -0.084585 0.146957 -0.576 0.5651
## cMajPhysics -0.009565 0.155174 -0.062 0.9509
## cMajUndeclared 0.241947 0.231955 1.043 0.2973
## csus.gpa.start 0.895163 0.101919 8.783 < 2e-16 ***
## adm.areanonlocal -0.036492 0.064317 -0.567 0.5706
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.6128765)
##
## Null deviance: 802.88 on 763 degrees of freedom
## Residual deviance: 450.46 on 735 degrees of freedom
## AIC: 1824.5
##
## Number of Fisher Scoring iterations: 2
## Balance Measures
## Type Diff.Adj M.Threshold
## prog.score Distance 0.0751 Balanced, <0.1
## chem1a.grd.pt.unt Contin. 0.0733 Balanced, <0.1
## cum.percent.units.passed Contin. 0.0017 Balanced, <0.1
## eth.erss_African American Binary -0.0225 Balanced, <0.1
## eth.erss_Asian Binary 0.0315 Balanced, <0.1
## eth.erss_Foreign Binary -0.0026 Balanced, <0.1
## eth.erss_Hispanic Binary 0.0002 Balanced, <0.1
## eth.erss_Two or More Races Binary -0.0311 Balanced, <0.1
## eth.erss_Unknown Binary 0.0034 Balanced, <0.1
## eth.erss_White Binary 0.0197 Balanced, <0.1
## eth.erss_Other Binary 0.0013 Balanced, <0.1
## gender_Male Binary 0.0475 Balanced, <0.1
## AP_CHEM Contin. 0.0027 Balanced, <0.1
## AP_CHEM.flg_Not Missing Binary 0.0142 Balanced, <0.1
## prevPAL Contin. -0.0266 Balanced, <0.1
## bot.level_Freshman Binary -0.0155 Balanced, <0.1
## bot.level_Junior Binary -0.0133 Balanced, <0.1
## bot.level_Senior Binary 0.0098 Balanced, <0.1
## bot.level_Sophomore Binary 0.0189 Balanced, <0.1
## m.rmd_Remedial in Math Binary -0.0003 Balanced, <0.1
## pell.coh.term.flg_Pell Binary 0.0308 Balanced, <0.1
## cMaj_Biology Binary 0.0454 Balanced, <0.1
## cMaj_Chemistry Binary -0.0114 Balanced, <0.1
## cMaj_Geology Binary 0.0024 Balanced, <0.1
## cMaj_Health Science Binary -0.0048 Balanced, <0.1
## cMaj_Kinesiology/Physical Education Binary -0.0210 Balanced, <0.1
## cMaj_Nutrition Binary -0.0020 Balanced, <0.1
## cMaj_OTHER Binary -0.0118 Balanced, <0.1
## cMaj_Physics Binary 0.0000 Balanced, <0.1
## cMaj_Undeclared Binary 0.0031 Balanced, <0.1
## csus.gpa.start Contin. 0.0559 Balanced, <0.1
## adm.area_nonlocal Binary 0.0024 Balanced, <0.1
## p.score Contin. 0.0017 Balanced, <0.1
##
## Balance tally for mean differences
## count
## Balanced, <0.1 33
## Not Balanced, >0.1 0
##
## Variable with the greatest mean difference
## Variable Diff.Adj M.Threshold
## chem1a.grd.pt.unt 0.0733 Balanced, <0.1
##
## Sample sizes
## Control Treated
## All 764. 573
## Matched (ESS) 214.85 573
## Matched (Unweighted) 457. 573
## Unmatched 307. 0
The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is 0.43171. This result is statistically significant with a P-value of \(1.7177x10^{-8}\) and is based on 573 PAL students and 991 non-PAL student matches(457 total non-PAL students) Note this P-value is for a two-tailed test, but it will be corrected to a one-tailed test (halves the P-value) in the final table output summarizing the effect of PAL across chemistry courses.
summary(match.chem1b)
##
## Estimate... 0.43171
## AI SE...... 0.076567
## T-stat..... 5.6383
## p.val...... 1.7177e-08
##
## Original number of observations.............. 1337
## Original number of treated obs............... 573
## Matched number of observations............... 573
## Matched number of observations (unweighted). 991
##
## Caliper (SDs)........................................ 0.25
## Number of obs dropped by 'exact' or 'caliper' 0
psens(match.chem1b, Gamma=2.0, GammaInc = 0.1)
##
## Rosenbaum Sensitivity Test for Wilcoxon Signed Rank P-Value
##
## Unconfounded estimate .... 0
##
## Gamma Lower bound Upper bound
## 1.0 0 0.0000
## 1.1 0 0.0000
## 1.2 0 0.0000
## 1.3 0 0.0000
## 1.4 0 0.0000
## 1.5 0 0.0005
## 1.6 0 0.0058
## 1.7 0 0.0363
## 1.8 0 0.1335
## 1.9 0 0.3217
## 2.0 0 0.5599
##
## Note: Gamma is Odds of Differential Assignment To
## Treatment Due to Unobserved Factors
##
Note that in the above table \(\Gamma=1.8\) in the first column is the first row where 0.05 is between the Lower and Upper bounds. This means that an unknown confounder which increases the odds of being in PAL by more than 1.8 is enough to change the treatment effect from significant to non-significant. The next code block generates the effect on the odds ratio of each variable in the propensity score. Thus, if there is an unknown confounder that has an effect on the propensity score similar to “instructor”bot.level" or “eth.erss” the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works which is not in our dataset is a confounder which could reverse the statistical significance of this analysis.
kable(sort(exp(abs(chem1b.first.order.prop.model$coefficients))))
x | |
---|---|
eth.erssTwo or More Races | 1.000886 |
AP_CHEM | 1.017427 |
eth.erssWhite | 1.075953 |
cMajChemistry | 1.078078 |
chem1a.grd.pt.unt | 1.099901 |
bot.levelSophomore | 1.101998 |
cMajOTHER | 1.103172 |
eth.erssForeign | 1.126751 |
cMajHealth Science | 1.165871 |
genderMale | 1.211890 |
cMajUndeclared | 1.239185 |
eth.erssHispanic | 1.243158 |
eth.erssAsian | 1.253423 |
adm.areanonlocal | 1.277138 |
cMajKinesiology/Physical Education | 1.310256 |
cMajGeology | 1.315418 |
cMajNutrition | 1.450910 |
cum.percent.units.passed | 1.530134 |
pell.coh.term.flgPell | 1.599900 |
csus.gpa.start | 1.606985 |
eth.erssUnknown | 1.622401 |
bot.levelJunior | 1.743177 |
prevPAL | 1.813838 |
m.rmdRemedial in Math | 1.966786 |
bot.levelSenior | 2.043680 |
AP_CHEM.flgNot Missing | 2.260607 |
eth.erssOther | 4.754146 |
cMajPhysics | 9.264774 |
(Intercept) | 11.231726 |
Course | Non-PAL | PAL | Diff. | Std. error | p-val | Sensitivity | N(non-PAL) | N(PAL) |
---|---|---|---|---|---|---|---|---|
CHEM 1B | 1.76 | 2.19 | 0.43 | 0.08 | 8.59e-09 | 1.8 | 457 | 573 |
Greifer, Noah. 2020. Cobalt: Covariate Balance Tables and Plots. https://CRAN.R-project.org/package=cobalt.
Leite, W. L. 2017. Practical Propensity Score Methods Using R. Thousand Oaks, CA: Sage Publishing. https://osf.io/nygb5/.
Sekhon, Jasjeet S. 2011. “Multivariate and Propensity Score Matching Software with Automated Balance Optimization: The Matching Package for R.” Journal of Statistical Software 42 (7): 1–52. http://www.jstatsoft.org/v42/i07/.
Yoshida, Kazuki, and Alexander Bartel. 2020. Tableone: Create ’Table 1’ to Describe Baseline Characteristics with or Without Propensity Score Weights. https://CRAN.R-project.org/package=tableone.
Zhang, Z., H. J. Kim, G. Lonjon, Y. Zhu, and written on behalf of AME Big-Data Clinical Trial Collaborative Group. 2019. “Balance Diagnostics After Propensity Score Matching.” Annals of Translational Medicine 7 (1): 16. https://doi.org/10.21037/atm.2018.12.10.