Load packages

First Add Some R Packages to the Workspace.
Caution: warning messages are suppressed to reduce clutter in the output.

tidyverse: Importing data, cleaning data, data manipulation, & data visualization
kableExtra: Build HTML tables
DataExplorer: Exploratory Data Analysis & Feature Engineering
tableone: Standardized mean differences for before and after matching
survey: Matched data with match weights
Matching: Propensity score matching
cobalt: Covariate balance
reshape2: Covariate balance plot
rbounds: Rosenbaum Sensitivity test

library(tidyverse)
library(kableExtra)
library(DataExplorer) 
library(tableone)
library(survey)
library(Matching)
library(cobalt)
library(reshape2)
library(rbounds)

select <- dplyr::select # Resolves package conflicts with select
options(width = 120) # Format print width

Load functions

General functions used throughout the analysis.

# Update palN for Chem 24 Spring 2019 ------------------------------------------
update.chem24s19 <- function(chem.dat) {
  PAL.course.data <- read_rds("palCourseData.rds")
  chem24.S19 <- PAL.course.data  %>%
    filter(term == "Spring 2019", course == "CHEM 24")
  # Add a palN indicator for Chem 24 Spring 2019
  chem24.S19 <- chem24.S19 %>%
    mutate(palN.chem24.S19 = case_when(
      pal.grade == "CR" ~ 2,
      is.na(pal.grade) ~ 0,
      TRUE ~ 1
    )) %>%
    select(emplid, palN.chem24.S19) 
  
  # Check how many student are non-PAL, incomplete PAL, and PAL
  table(chem24.S19$palN.chem24.S19)
  # 0  1  2 
  # 51 10 52 
  
  chem.dat <- left_join(chem.dat, chem24.S19, by= "emplid" )
  
  chem.dat  <- chem.dat %>%
    mutate(palN = case_when(
      course == "CHEM 24" & term == "Spring 2019" ~ palN.chem24.S19,
      TRUE ~ palN
    )) %>%
    select(-palN.chem24.S19)
  
  return(chem.dat)
}

# Get raw table of mean gpa for PAL and non-PAL  -------------------------------
get.raw.tab <- function(classes, df)
{ 
 raw.table = data.frame(class=character(),
                         nonPALavg=numeric(),
                         PALavg=numeric(), 
                         Diff=numeric(), 
                         NonPAL_Num= integer(),
                         PAL_Num=integer(),
                         CompletePAL=numeric(),
                         TermPALStart=integer(),
                         row.names=NULL,
                         stringsAsFactors = FALSE)
  
  
  for (i in 1:length(classes))
  {
    curr.class = classes[i]
    temp = subset(df, course==curr.class & course.seq==0)
    pal.start=min(unique(temp$term.code[temp$palN==2]))
    # only include terms after PAL start term
    temp = subset(temp, term.code>= pal.start)
    
    x=tapply(temp$grd.pt.unt,temp$palN, 
             mean, na.rm=T) %>% 
      as.numeric %>% 
      round(2)
    
    y=table(temp$palN) %>% as.numeric
    
    raw.table[i, 'class' ] = curr.class
    raw.table[i, c(2:4,7)]=c(x[1], x[3],x[3]-x[1],
                             round(y[3]/sum(y),2))
    raw.table[i, c(5,6,8)]= c(y[1], y[3], pal.start)
    
  }
  
  # formatted table
  raw.table <- kable(raw.table, caption = "Raw Comparison of PAL and non-PAL Grades (No Propensity Adjustment)") %>%
    kable_styling(full_width= T, position = "left")
 
   return(raw.table)
}

# Data cleaning ----------------------------------------------------------------
clean.data <- function(df)
{
  # Replaced coh.term with coh.term.course
  yr.course.taken = as.numeric(gsub(".*([0-9]{4})","\\1",df$coh.term.course))
  df$delay.from.hs = ifelse(!is.na(yr.course.taken) & !is.na(df$hs.grad.yr),
                                  yr.course.taken-df$hs.grad.yr, NA)
  
  sum(is.na(df$delay.from.hs)) 
  
  # remove students who did not complete PAL 
  df=subset(df, palN!=1) 
  
  #recode palN to factor with 0/1 levels
  df$palN = ifelse(df$palN==2, 1, 0)
  
  #clean up category names in m.rmd and e.rmd
  df$m.rmd[df$m.rmd=="Not Remedial\nin Math"]="Not Remedial in Math"
  df$m.rmd[df$m.rmd=="Remedial\nin Math"]="Remedial in Math"
  df$e.rmd[df$e.rmd=="Not Remedial\nin English"]="Not Remedial in English"
  df$e.rmd[df$e.rmd=="Remedial\nin English"]="Remedial in English"
  
  df <- df %>% mutate(m.rmd = factor(m.rmd), e.rmd = factor(e.rmd))
  # table(df$e.rmd)
  
  # Create feature, proportion of cumulative units taken that were passes
  # To distinguish the students who have taken 0 units from the students who 
  #   have passed 0  units they have taken, students who have taken 0 units are 
  #   labeled as -1. Then the -1 is replaced by the mean of cum.percent.units.passed
  df <- df %>%
    mutate(cum.percent.units.passed = ifelse(tot.taken.prgrss.start == 0, -1,
                                             tot.passd.prgrss.start / tot.taken.prgrss.start)) %>%
    mutate(cum.percent.units.passed = ifelse(cum.percent.units.passed  == -1, mean(cum.percent.units.passed,  na.rm =TRUE),
                                             cum.percent.units.passed  ))
  
  # code instructor as alphabetic letter for anonymity
  df$Instructor_01=droplevels(factor(df$Instructor_01))
  
  instructor.vec = sort(unique(df$Instructor_01))
  num.instr = length(instructor.vec)
  
  df$Instructor_01 = factor(
    df$Instructor_01, levels = instructor.vec, labels=as.character(1:num.instr)
  )
  
  key.instr.code = cbind(as.character(instructor.vec), 1:num.instr)

  # Add "cMaj", census majors without concentrations/specializations/tracks/etc. 
  major_lookup <- read.csv("Census Major Lookup.csv", header = TRUE, 
                           stringsAsFactors = FALSE)
  df <- merge(df, major_lookup %>% select(censusMajor, cMaj),
              by = "censusMajor", all.x = TRUE)

  # Recode mother's education and father's education variables.
  non.hs.grad= c("No High School","Some High School")
  hs.grad= c("High School Graduate","Some College","2-Year College Graduate")
  coll.grad= c("4-Year College Graduate","Postgraduate")
  parent.ed.levels= c(
    "Non-HS Graduate","HS Graduate", "College Graduate", "Unknown"
  )
  
  df <- df %>%
    mutate(
      mother.ed = ifelse(mother.ed %in% non.hs.grad, "Non-HS Graduate",
        ifelse(mother.ed %in% hs.grad, "HS Graduate", 
          ifelse(mother.ed %in% coll.grad, "College Graduate", "Unknown"))),
      mother.ed= factor(mother.ed, levels= parent.ed.levels),
      father.ed = ifelse(father.ed %in% non.hs.grad,"Non-HS Graduate",
        ifelse(father.ed %in% hs.grad, "HS Graduate", 
          ifelse(father.ed %in% coll.grad, "College Graduate", "Unknown"))),
      father.ed= factor(father.ed, levels= parent.ed.levels))
  
  # Recoded adm.area with these counties as local: 'El Dorado', 'Nevada', 
  #   'Placer', 'Sacramento', 'San Joaquin', 'Solano', 'Yolo'.
  counties.rad <- read_csv(
    "countiesRadius120mi.csv", 
    col_types = cols(
      state = col_skip(), city = col_skip(), distance.km = col_skip()
    )
  )                                     
  
  df <- left_join(df, counties.rad, by = "zip")
  
  local.adm.counties <- c(
    'El Dorado', 'Nevada', 'Placer', 'Sacramento', 'San Joaquin', 'Solano', 
    'Yolo'
  )
  
  # County will be NA if the zip code is not within 120 mile radius of 
  #   CSUS zip code(95819) 
  df <- df %>%
    mutate(
      adm.area = 
        if_else(!(county %in% local.adm.counties) | is.na(county), 
                         "nonlocal", "local")
    ) %>%
    mutate(sac.county.flg =
             if_else(!(county == "Sacramento") | is.na(county), 0, 1)
    ) %>%
    mutate(sac.county.flg = factor(sac.county.flg))

return(df)
}

# Extract prerequisite course grade ---------------------------------------------
get.prereq.grades <- function(course.df, df, prereq) {
  # Get student's recent Chem 1B grade
  course.stu <- course.df$emplid
  prereq.df <- df %>%
    select(emplid, course, course.seq, grd.pt.unt, grade) %>%
    filter(emplid %in% course.stu, course== prereq) %>% 
    group_by(emplid) %>%
    filter(course.seq == max(course.seq)) %>%
    rename(
      prereq.course.seq = course.seq, prereq.grd.pt.unt = grd.pt.unt, 
      prereq.grade = grade
    ) %>% 
    select(-course)
  
  dim(prereq.df) # [1] 275   6
  prereq.stu <- prereq.df$emplid
  
  course.df <- course.df %>%
    filter(emplid %in% prereq.stu)
  course.df <- left_join(course.df, prereq.df, by = "emplid")
  
  return(course.df)
}

# Get only the variables that have missing values ---------------------------------------------
get.missing.only <- function(course.df) {
  get.missing.only <- course.df %>% 
    summarise(across(everything(), ~ sum(is.na(.x)))) %>%
    gather() %>%
    filter(value != 0) 
  get.missing.only <- course.df %>%
    dplyr::select(all_of(get.missing.only$key)) 

  return(get.missing.only)
}

# Get imbalanced variables with SMD > 0.1------------------------------------
get.imbal.vars <- function(tab)
{
  get.imbal.vars <- as.data.frame(ExtractSmd(tab))
  get.imbal.vars <- get.imbal.vars %>%
    rownames_to_column(var = "Variable") %>%
    rename(`Before Matching SMD` = `1 vs 2`) %>%
    filter(`Before Matching SMD` > 0.1) %>% 
    arrange(desc(`Before Matching SMD`))
  get.imbal.vars <- kable(
    get.imbal.vars, caption = "Variables with SMD > 0.1"
    ) %>%
    kable_styling(full_width= F)
  
  return(get.imbal.vars)
}
# Unadjusted means -------------------------------------------------------------
get.unadj.means <- function(df.final)
{
  get.unadj.means <- df.final %>%
    group_by(palN) %>% summarise(unadj.means = mean(grd.pt.unt)) %>%
    pivot_wider(names_from = "palN", values_from = "unadj.means") %>%
    rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
    mutate(Diff. = `PAL`-`Non-PAL`)
  
  get.unadj.means<- kable(
    get.unadj.means, caption = "Unadjusted Mean Grades"
    ) %>%
    kable_styling(full_width= F)
  
  return(get.unadj.means)
}
# Adjusted means  --------------------------------------------------------------
adj.means <- function(match.list, matched.dat) {
  get.adj.means <- matched.dat %>%
    group_by(palN) %>% 
    summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
    pivot_wider(names_from = "palN", values_from = "adj.means") %>%
    rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
    mutate(Diff. = `PAL`-`Non-PAL`)
  
  # formatted table
  get.adj.means<- kable(get.adj.means, caption = "Adjusted Mean Grades") %>%
    kable_styling(full_width= F)
  
  return(get.adj.means)
}

# Match Table ------------------------------------------------------------------
create.match.tab <- function(matched.dat) {
  matched.dat <- matched.dat %>%
    mutate(pal = if_else(palN == 0, "Non-PAL", "PAL"))
  pal.flg <- c('Non-PAL', 'PAL')
  
  for (i in seq_along(pal.flg)) {
    multiple.matches <- matched.dat %>%
      filter(pal ==pal.flg[i]) %>%
      count(id) %>%
      filter(n> 1) %>%
      summarise(n())
    single.matches <- matched.dat %>%
      filter(pal == pal.flg[i]) %>%
      count(id) %>%
      filter(n==1) %>%
      summarise(n())
    if(pal.flg[i] == 'Non-PAL') {
      match.tab <- bind_rows(single.matches,  multiple.matches)
      match.tab <- match.tab %>%
        rename('Non-PAL'= 'n()')
    }
    pal.matches <- bind_rows(single.matches, multiple.matches)
    match.tab$PAL <- pal.matches$`n()`
    row.names(match.tab) <- c("Single Matches", "Multiple Matches")
  } 
  match.tab <-rbind(
    match.tab, "Total Students" = c(sum(match.tab$`Non-PAL`), sum(match.tab$`PAL`))
  )
  match.tab <- kable(match.tab, caption = "PAL and Non-PAL Matches") %>%
    kable_styling(full_width= F)
  
  return(match.tab)
}

# ATT plot ---------------------------------------------------------------------
# https://livefreeordichotomize.com/2019/01/17/understanding-propensity-score-weighting/
# https://www.csus.edu/brand/colors.html
get.att.plot <- function(df.final, match.list)
{
  df.final$p.score <- p.score
  
  df.final <- df.final %>%
    select(-id) %>%
    rownames_to_column(var = "id")

  ps.dat <- df.final %>%
    select(id, palN, p.score) %>%
    pivot_wider(
      names_from = "palN", values_from = "p.score", names_prefix = "b.pal."
    )
  before.match <- ps.dat %>%
    select(b.pal.0, b.pal.1)
  
  matched.dat <- df.final[unlist(match.list[c("index.treated", "index.control")]), ]
  matched.dat$match.weights<-  c(match.list$weights, match.list$weights)

  after.match <-matched.dat %>% 
    select(-id) %>%
    rownames_to_column(var = "id")
  after.match <- after.match %>%
    pivot_wider(names_from = "palN", values_from = "p.score", names_prefix = "pal.")
  after.match <- after.match %>%
    select(pal.0, pal.1, match.weights)
  
  get.att.plot <- ggplot() +
    geom_histogram(data = before.match, bins = 50, aes(b.pal.1), alpha = 0.5) + 
    geom_histogram(data = after.match,bins = 50, aes(pal.1, weight = match.weights), 
                   fill = "#043927", alpha = 0.5) + 
    geom_histogram(data = before.match, bins = 50, alpha = 0.5, 
                   aes(x = b.pal.0, y = -..count..)) + 
    geom_histogram(data = after.match, bins = 50, 
                   aes(x = pal.0, weight = match.weights, y = -..count..), 
                   fill = "#c4b581", alpha = 0.5) + 
    ylab("Count") + xlab("Propensity Scores") +
    geom_hline(yintercept = 0, lwd = 0.5) +
    scale_y_continuous(label = abs) 

return(get.att.plot)
}

# Variable Percent Improvement -------------------------------------------------
get.var.perc.tab <- function(list.bal) {
  get.var.perc.tab <- list.bal %>%
    pluck("Balance") %>%
    rownames_to_column("Variable") %>%
    dplyr::select("Variable", "Type", "Diff.Un","Diff.Adj") %>%
    mutate(`% Improvement` = if_else(Diff.Un == 0, 0, round(((abs(Diff.Un) - abs(Diff.Adj))/ abs(Diff.Un)) * 100 , 0))) %>%
    arrange(desc(`% Improvement`))
  get.var.perc.tab <- get.var.perc.tab %>% dplyr::select("Variable", "Diff.Un", "Diff.Adj", `% Improvement`)
  
  return(get.var.perc.tab)
}

# Covariate Balance Plots -------------------------------------------------------
# https://cran.r-project.org/web/packages/tableone/vignettes/smd.html
# https://www.csus.edu/brand/colors.html
get.bal.plot <- function(unmatched.tab, matched.tab) {
  ## Construct a data frame containing variable name and SMD from all methods
  dataPlot <- data.frame(variable  = rownames(ExtractSmd(unmatched.tab)),
                         Unmatched = as.numeric(ExtractSmd(unmatched.tab)),
                         Matched   = as.numeric(ExtractSmd(matched.tab))  )
  
  ## Create long-format data for ggplot2
  dataPlotMelt <- melt(data          = dataPlot,
                       id.vars       = c("variable"),
                       variable.name = "Method",
                       value.name    = "SMD")
  
  ## Order variable names by magnitude of SMD
  varNames <- as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
  
  ## Order factor levels in the same order
  dataPlotMelt$variable <- factor(dataPlotMelt$variable,
                                  levels = varNames)
  
  ## Plot using ggplot2
  # Sac State colors and dashed line
  get.bal.plot <-ggplot(
    data = dataPlotMelt, mapping = 
      aes(x = variable, y = SMD, group = Method, color= Method)) +
    scale_color_manual(values = c("#043927", "#c4b581")) +
    geom_line(aes(linetype = Method)) +
    geom_point() +
    scale_linetype_manual(values= c("dashed", "solid")) +
    geom_hline(yintercept = 0.1, color = "black", size = 0.1) +
    coord_flip() +
    theme_bw() + theme(legend.key = element_blank())
  
  return(get.bal.plot)
}

# PAL Effect -------------------------------------------------------------------
get.pal.effect <- function(match.list, matched.dat, course) {  
 get.gamma <- psens(match.list, Gamma=2.0, GammaInc = 0.1)[["bounds"]] %>%
    filter(`Lower bound` < 0.05 & 0.05 < `Upper bound`) %>%
    slice_min(Gamma) %>% 
    select(Gamma) 
  
  get.pal.effect <-  matched.dat %>%
    group_by(palN) %>% 
    summarise(adj.means = weighted.mean(grd.pt.unt, match.list$weights)) %>%
    pivot_wider(names_from = "palN", values_from = "adj.means") %>%
    rename(`Non-PAL`= `0`, `PAL`= `1`) %>%
    mutate(Course= course, .before= "Non-PAL") %>%
    mutate(Diff. = `PAL`-`Non-PAL`) %>%
    mutate(`Std. error`= match.list$se, .after= "Diff.") %>%
    mutate(
      `p-val`= formatC( 1-pnorm(Diff./`Std. error`), format = "e", digits = 2), 
      Sensitivity= get.gamma$Gamma, 
      `N(non-PAL)`= length(unique(match.list$index.control)),
      `N(PAL)`= match.list$wnobs
    )

  return(get.pal.effect)
  }

Specialized functions for each course.

## BIO 22 ====================================================================
## Filter to relevant variables 
bio22.step.vars <- function(course.df) {
  vars.to.keep <- c(
    'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 'course.count',
    'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd', 
    'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa',
    'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
    'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason',  'sac.county.flg', 
    'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'sat.math.score',
    'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',    
    'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg',    'AP_CALBC.flg', 
    'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
  )
  new.vars <- intersect(vars.to.keep, names(bio22.dat))
  bio22.final <- bio22.dat[ ,new.vars]
  
  return(bio22.final)
}

## Build a Logistic Regression Model for Propensity Score 
## Fit Propensity Score model (linear terms only)
bio22.step <- function(final.df) {
  # AP_CALAB 
  min.model <- glm(
    palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score +
      sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg, 
    data= bio22.final, family=binomial
  )
  summary(min.model)
  
  biggest <- formula(glm(palN ~. - grd.pt.unt,  data=bio22.final, family=binomial))
  bio22.step.first.order <- step(
    min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
  summary(bio22.step.first.order)
  bio22.step.first.order$anova
  
  model.first.order <- formula(bio22.step.first.order)
  bio22.first.order.prop.model <- glm(
    model.first.order, data=bio22.final, family=binomial
  )
  
  return(bio22.first.order.prop.model)
}


## CHEM 1A ====================================================================
## Filter to relevant variables 
chem1a.step.vars <- function(course.df) {
  vars.to.keep <- c(
    'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age',   
    'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
    'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa', 
    'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
    'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason',  'sac.county.flg',
    'term.units.attemptedCensus','palN', 'grd.pt.unt', 'sat.math.score', 
    'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
    'AP_CALBC', 'AP_CHEM','AP_BIOL.flg',    'AP_CALAB.flg', 'AP_CALBC.flg', 
    'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
  )
  new.vars <- intersect(vars.to.keep, names(chem1a.dat))
  chem1a.final <- chem1a.dat[ ,new.vars]
  
  return(chem1a.final)
}

## Build a Logistic Regression Model for Propensity Score 
## Fit Propensity Score model (linear terms only)
chem1a.step <- function(final.df) {
  # Stepwise selection selected AP_CALAB.flg, AP_BIOL.flg, AP_CHEM, and
  # AP_CHEM.flg
  min.model <- glm(
    palN ~ cum.percent.units.passed + eth.erss + gender + sat.math.score + 
      sat.verbal.score + sat.math.flg + AP_CALAB + AP_CALAB.flg + AP_BIOL +
      AP_BIOL.flg + AP_CHEM + AP_CHEM.flg, data= chem1a.final, family=binomial
  )
  summary(min.model)

  biggest <- formula(
    glm(palN ~. - grd.pt.unt, data=chem1a.final, family=binomial)
  )

  chem1a.step.first.order <- step(
    min.model, direction="forward", scope = biggest, trace=FALSE, k=2
  )
  summary(chem1a.step.first.order)
  chem1a.step.first.order$anova
  
  model.first.order <- formula(chem1a.step.first.order)
  chem1a.first.order.prop.model <- glm(
    model.first.order, data=chem1a.final, family=binomial
  )
  
  return(chem1a.first.order.prop.model)
}

## CHEM 1B ====================================================================
## Filter to relevant variables
chem1b.step.vars <- function(course.df) {
  vars.to.keep <- c(
    'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 
    'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd', 
    'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa', 
    'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
    'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason',  'sac.county.flg', 
    'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1a.grd.pt.unt',
    'AP_BIOL',  'AP_CALAB', 'AP_CALBC', 'AP_CHEM','AP_BIOL.flg', 'AP_CALAB.flg',
    'AP_CALBC.flg', 'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
  )
  new.vars <- intersect(vars.to.keep, names(chem1b.dat))
  chem1b.final <- chem1b.dat[ ,new.vars]

  return(chem1b.final)
}

## Build a Logistic Regression Model for Propensity Score
## Fit Propensity Score model (linear terms only)
chem1b.step <- function(final.df) {
  # Stepwise selection selected AP_BIOL.flg and AP_CHEM.flg
  # Removed AP_BIOL.flg. Then stepwise selection selected AP_CALAB.flg.
  # Removed AP_CALAB.flg and pct.female.head.flg
  min.model <- glm(
    palN ~ chem1a.grd.pt.unt + cum.percent.units.passed + eth.erss + gender +
      AP_CHEM + AP_CHEM.flg, data= chem1b.final, family=binomial
  )
  summary(min.model)

  biggest <- formula(
    glm(palN ~. - grd.pt.unt - AP_BIOL.flg - AP_CALAB.flg - pct.female.head.flg,  
        data=chem1b.final, family=binomial)
  )

  chem1b.step.first.order <- step(min.model,
                                  direction="forward",scope = biggest,
                                  trace=FALSE, k=2)
  summary(chem1b.step.first.order)
  chem1b.step.first.order$anova

  model.first.order <- formula(chem1b.step.first.order)
  chem1b.first.order.prop.model <- glm(model.first.order, data=chem1b.final, family=binomial)

  return(chem1b.first.order.prop.model)
}

## CHEM 4 ====================================================================
## Filter to relevant variables 
chem4.step.vars <- function(course.df)
{
  vars.to.keep <- c(
    'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 
    'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd',
    'eth.erss', 'father.ed','fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa', 
    'Instructor_01','median.income','m.rmd', 'mother.ed', 'pct.female.head',
    'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason',  'sac.county.flg', 
    'term.units.attemptedCensus', 'palN', 'grd.pt.unt','sat.math.score',
    'sat.math.flg', 'sat.verbal.score', 'sat.verbal.flg', 'AP_BIOL', 'AP_CALAB',
    'AP_CALBC', 'AP_CHEM','AP_BIOL.flg',    'AP_CALAB.flg', 'AP_CALBC.flg', 
    'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
  )
  
  new.vars <- intersect(vars.to.keep, names(chem4.dat))
  chem4.final <- chem4.dat[ ,new.vars]
  
  return(chem4.final)
}

## Build a Logistic Regression Model for Propensity Score 
## Fit Propensity Score model (linear terms only)
chem4.step <- function(final.df)
{
 # "AP_BIOL"
  min.model <- glm(
    palN ~ cum.percent.units.passed + eth.erss + gender+ sat.math.score + 
      sat.verbal.score+sat.math.flg + AP_CALAB+AP_CALAB.flg, data= chem4.final, 
    family=binomial
  )
  summary(min.model)

  biggest <- formula(
    glm(palN ~. - grd.pt.unt - AP_BIOL, data=chem4.final, family=binomial)
  )

  chem4.step.first.order <- step(
    min.model, direction="forward", scope = biggest, trace=FALSE, k=2)
  summary(chem4.step.first.order)
  chem4.step.first.order$anova
  
  model.first.order <- formula(chem4.step.first.order)
  chem4.first.order.prop.model <- glm(
    model.first.order, data=chem4.final, family=binomial
    )
  
  return(chem4.first.order.prop.model)
}

## CHEM 24 ====================================================================
## Filter to relevant variables 
chem24.step.vars <- function(course.df)
{
  vars.to.keep <- c(  
    'acad.stand', 'adm.area', 'bot.level','cMaj', 'coh', 'course.age', 
    'csus.gpa.start', 'cum.percent.units.passed', 'delay.from.hs', 'e.rmd', 
    'eth.erss', 'father.ed', 'fys.flg','gender', 'hous.coh.term.flg', 'hs.gpa', 
    'Instructor_01', 'median.income','m.rmd', 'mother.ed', 'pct.female.head',
    'pell.coh.term.flg', 'prevPAL', 'prevPASS', 'reason',  'sac.county.flg', 
    'term.units.attemptedCensus', 'palN', 'grd.pt.unt', 'chem1b.grd.pt.unt', 
    'chem1b.term.gpa', 'chem1b.units.attempted', 'AP_BIOL', 'AP_CALAB',
    'AP_CALBC', 'AP_CHEM','AP_BIOL.flg',    'AP_CALAB.flg', 'AP_CALBC.flg', 
    'AP_CHEM.flg', 'pct.female.head.flg', 'med.inc.flg'
  )
  new.vars <- intersect(vars.to.keep, names(chem24.dat))
  chem24.final <- chem24.dat[ ,new.vars]
  
  return(chem24.final)
}

## Build a Logistic Regression Model for Propensity Score 
## Fit Propensity Score model (linear terms only)
chem24.step <- function(final.df) {
  min.model <- glm(
    palN ~ chem1b.grd.pt.unt + cum.percent.units.passed + eth.erss + gender,
    data= chem24.final, family=binomial
  )
  summary(min.model)
  
  biggest <- formula(
    glm(palN ~.- grd.pt.unt - acad.stand - reason - pct.female.head.flg, 
        data=chem24.final, family=binomial)
  )
  
  chem24.step.first.order <- step(
    min.model, direction="forward", scope = biggest, trace=FALSE, k=2
    )
  summary(chem24.step.first.order)
  chem24.step.first.order$anova
  
  model.first.order <- formula(chem24.step.first.order)
  chem24.first.order.prop.model <- glm(
    model.first.order, data=chem24.final, family=binomial
  )
 
  return(chem24.first.order.prop.model)
}

Import the Data

Make sure the PAL datafile in the same directory as this RMarkdown file.

PALdatafull <- read_rds("paldatafull_csv.rds")
dim(PALdatafull)
## [1] 1099371     174
sum(PALdatafull$grd.pt.unt)
## [1] 2237555

The files which includes data through the Spring 2019 semester has 1099371 rows and 174 columns. The total of the grd.pt.unt column is 2237555.

Chemistry classes

Subset data for chemistry classes

chem.classes <- paste("CHEM", c(4, '1A', '1B', 24))
chem.dat <- PALdatafull %>%
  filter(base.time.course == 1, course %in% chem.classes) %>%
  mutate(course = factor(course, levels = chem.classes)) 
dim(chem.dat) #  18948   174
## [1] 18948   174
num.stu <- dim(chem.dat)[1]
num.vars <- dim(chem.dat)[2]

There are 18948 rows and 174 variables. Each row is a chemistry student. So, there is a total of 18948 chemistry students.

Update CHEM 24 Spring 2019 for chemistry data

There are 83 first attempt only Chem 24 Spring 2019 students. Some of them are incorrectly labeled as non-PAL and need to be relabeled.

with(chem.dat %>% 
       filter(base.time.course == 1, pass.term.flg == "PASS Term",  course == "CHEM 24",  term == "Spring 2019", course.seq == 0), 
     table(palN))
## palN
##  0 
## 83
chem.dat <- update.chem24s19(chem.dat)

with(chem.dat %>% 
       filter(base.time.course == 1, pass.term.flg == "PASS Term",  course == "CHEM 24",  term == "Spring 2019", course.seq == 0), 
     table(palN))
## palN
##  0  1  2 
## 28  9 46
#  0  1  2 
# 28  9 46 

After relabeling, there are 28 non-PAL students, 9 incomplete PAL students, and 46 PAL students for Chem 24 Spring 2019.

Compare the mean gpa for PAL and non-PAL students by Course without Propensity Score Adjustment

The course.seq variable indicate how many times a student has taken a course prior to the current attempt. To filter on the first attempt at a course, we set course.seq to 0.

Note: Excludes incomplete PAL students

get.raw.tab(chem.classes, chem.dat)
Raw Comparison of PAL and non-PAL Grades (No Propensity Adjustment)
class nonPALavg PALavg Diff NonPAL_Num PAL_Num CompletePAL TermPALStart
CHEM 4 2.03 2.39 0.36 1929 759 0.28 2123
CHEM 1A 1.63 2.04 0.41 1717 1055 0.37 2128
CHEM 1B 1.70 2.11 0.41 1090 769 0.40 2138
CHEM 24 1.63 2.06 0.43 224 177 0.43 2178

Data Cleaning & Feature Engineering

Create new variables.
delay.from.hs: delay since high school
cum.percent.units.passed: cumulative percent of units passed
cMaj: census majors without concentrations/specializations/tracks/etc.
county: which county did the student live in at the time of application to Sac state
sac.county.flg: did the student live in Sacramento county at the time of application to Sac State

Collapse sparse categories and other miscellaneous clean up of data. Sparse categories can cause complete separation in logistic regression and are only predictive for a few students.

# Check how many students did not complete PAL
sum(chem.dat$palN==1) # 226 
## [1] 226
incl.pal.stu <- sum(chem.dat$palN==1)
chem.dat <- clean.data(chem.dat)
dim(chem.dat) # 18722   179
## [1] 18722   179

There were 226 chemistry students who did not complete PAL and were removed from the analysis. There are now 18722 chemistry students instead of 18948.

There were originally 174 variables in the data set, 5 variables were added, so there are now 179 total variables in the data set.

CHEM 4

Executive Summary

Based on data for 759 PAL students and 1929 non-PAL students, the unadjusted, raw difference in average grade for PAL and non-PAL students was 0.36 on a A=4.0 grade scale. However, since students self-select into supplemental PAL instruction, it is possible that the resulting PAL and non-PAL groups were not balanced with respect to other characteristics which could impact course grade. For example, if students with better study habits tend to enroll in PAL, all else being equal, the PAL mean grade would be higher than non-PAL– even if PAL had no effect on course grade. Consequently, we also performed a propensity score analysis to adjust the estimated effect of PAL on course grade for potential self-selection biases.

After adjusting for self-selection bias, we found that PAL students earned an average grade \(0.44\pm 0.09\) higher than non-PAL students. A sensitivity analysis indicates that this analysis is moderately sensitive to unknown confounders. Although the data give us sufficient evidence to conclude that PAL increases students’ grades in Chem 4, the existence of an unknown confounder similar in magnitude to living in on-campus housing during their first year, ethnicity, or major would nullify that conclusion.

Detailed Summary

A propensity score analysis was conducted to assess the effect of PAL supplemental instruction on Chem 4 course grade. Propensity score adjustment was necessary since the data are observational and the characteristics of students who voluntarily enroll in PAL may differ in ways that may, independently of PAL, impact course grade compared to students who do not enroll in PAL. In propensity score analysis, variables related to both likelihood of PAL enrollment and course grade (confounders) are used in a logistic regression model to obtain a propensity score, which is a student’s likelihood of enrolling in PAL.

For Chem 4, 13 covariates were found to have a statistically significant relationship to likelihood of enrolling in PAL. Variables related to increased likelihood of enrolling were: female gender, lower SAT scores, lower AP Calculus exam scores, higher term units attempted, academic major, class level, and CSUS GPA at start of term.

Using the propensity score model, all students in the dataset, PAL and non-PAL, are assigned a propensity score. Then, each PAL student is matched to one or more non-PAL students who have similar propensity score(s). After matching, the PAL and matched non-PAL groups are compared to determine if the distribution of each covariate is similar between the two groups. This is called a balance check. If the standardized difference between the non-PAL and PAL means is less than 0.10 then the strong criteria in (Leite 2017, p.10) is met for covariate balance. If the standardized difference is under 0.25, then a more lenient criteria is met. The highest absolute value standardized mean difference in this analysis is 0.0794. Consequently, adequate balance appears to have been achieved.

The difference in the average grade for the matched PAL and non-PAL data is then calculated. The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is \(0.44\pm 0.09\) or between 0.35 and 0.53 on a 4.0 grade scale. This result is statistically significant with a P-value of \(2.19x10^{-7}\) and is based on 530 PAL students and 680 non-PAL students. For comparison, the non-propensity score adjusted difference in average grade for PAL and non-PAL students was 0.36.

The estimated PAL effect is based on the assumption that the propensity model includes all potential confounders for PAL enrollment and grade in Chem 4. However, it is possible that unknown confounders exist. A sensitivity analysis was conducted to determine how strong an unknown confounder must be to nullify the statistically significant PAL effect that was found in this analysis. The sensitivity analysis (Rosenbaum, 2002) indicated that an unknown confounder which increases the odds of being in PAL by more than 1.7 is enough to change the treatment effect from significant to non-significant. Inspection of the covariates in the estimated propensity model for Chem 4 indicates that if there is an unknown confounder that has an effect on the propensity score similar to the effect of class level, instructor, or cohort (Transfer or Native Freshmen) observed in this analysis, the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works (which is not in our dataset) is an unknown confounder which could reverse the statistical significance of this analysis.

Additionally, a number of variables were removed from this analysis due to large amounts of missingness. Since all students who had missing information on any included covariate were eliminated from the analysis, a balance had to be struck between retaining a sufficiently large pool of PAL and non-PAL students and retaining a sufficient number of important covariates. Variables which were eliminated from this analysis had substantial missing data or were subjectively judged as unlikely to be confounding. The choices about which variables to retain resulted in the original pool of 759 PAL students in Chem 4 being reduced to 530. Also, 680 non-PAL students were selected out of 1929 original non-PAL students.

When a PAL student had more than one suitable match among the non-PAL students, all non-PAL students were taken as matches and weighted appropriately in the final estimated PAL effect. There were 1238 non-PAL matches. Of the 530 PAL students, 206 were matched one-to-one with non-PAL students and 324 were matched one-to-many with non-PAL students.

Extract CHEM 4 Data

The non-PAL and PAL groups will include students with only first attempts at CHEM 4. They will also include students with previous PAL participation and/or are currently in a PAL for another course.

# Excludes course repeats
chem4.dat <- chem.dat %>%
  filter(course=="CHEM 4", pass.term.flg == "PASS Term", course.seq== 0) 
dim(chem4.dat) # 2688  179
## [1] 2688  179

There are 2688 first attempt CHEM 4 students.

Collapse ‘cMaj’ variable separately for each course since the amount of collapsing necessary will vary by course.

# Collapsed cMaj categories to Biology and Other majors at 0.09
with(chem4.dat, table(cMaj, palN))
##                                 palN
## cMaj                               0   1
##   Anthropology                     4   2
##   Art                              1   0
##   Biology                        590 324
##   Business                        14   2
##   Chemistry                      168  66
##   Child Devel/Early Childhood Ed   8   6
##   Civil Engineering              220  42
##   Communications                   5   1
##   Computer Engineering            21   8
##   Computer Science                17   7
##   Construction Management          2   2
##   Criminal Justice                16   8
##   Deaf Studies                     3   0
##   Economics                        1   0
##   Electrical Engineering          86  13
##   English                          3   3
##   Environmental Studies           24   6
##   Family & Consumer Sciences       1   0
##   Film                             1   0
##   Finance                          3   1
##   French                           1   0
##   Geology                         27   8
##   Gerontology                      2   0
##   Graphic Design                   3   0
##   Health Science                  17   6
##   History                          2   3
##   Humanities                       0   1
##   Interior Design                  1   0
##   International Business           1   2
##   Journalism                       1   1
##   Kinesiology/Physical Education 202  81
##   Liberal Studies                  8   0
##   Mathematics                      7   2
##   Mechanical Engineering         212  62
##   Music                            0   1
##   Nursing                         29  25
##   Nutrition                       89  35
##   Philosophy                       3   1
##   Photography                      0   1
##   Physical Science                 1   0
##   Physics                         13   3
##   Political Science                2   0
##   Psychology                      42   8
##   Recreation Administration        0   1
##   Social Science                   1   1
##   Social Work                      1   3
##   Sociology                        1   0
##   Spanish                          1   2
##   Speech Pathology                 8   0
##   Theatre Arts                     1   1
##   Undeclared                      61  20
chem4.dat <- group_category(data = chem4.dat, feature = "cMaj", threshold = 0.09,  update = TRUE)
with(chem4.dat, table(cMaj, palN))
##                                 palN
## cMaj                               0   1
##   Biology                        590 324
##   Chemistry                      168  66
##   Civil Engineering              220  42
##   Electrical Engineering          86  13
##   Environmental Studies           24   6
##   Geology                         27   8
##   Kinesiology/Physical Education 202  81
##   Mechanical Engineering         212  62
##   Nursing                         29  25
##   Nutrition                       89  35
##   OTHER                          179  69
##   Psychology                      42   8
##   Undeclared                      61  20

Analyze missingness

Remove variables having too many missing values in order to retain a larger pool of PAL and non-PAL students.

## [1] 40
##                        feature num_missing pct_missing
## 19               Instructor_02        2688  1.00000000
## 24                   deg.plan3        2688  1.00000000
## 25                   deg.plan4        2688  1.00000000
## 26                   deg.plan5        2688  1.00000000
## 27                   deg.plan6        2688  1.00000000
## 21             withdraw_reason        2671  0.99367560
## 23                   deg.plan2        2667  0.99218750
## 13                  trf.gpaADM        2330  0.86681548
## 4                  pledge.term        2157  0.80245536
## 29                grad.termERS        1738  0.64657738
## 22                   deg.plan1        1713  0.63727679
## 28                   grad.term        1713  0.63727679
## 30                         ttg        1713  0.63727679
## 1                fys.term.code        1579  0.58742560
## 2                      fys.grd        1579  0.58742560
## 3                  fys.rpt.flg        1579  0.58742560
## 20               treat.section        1367  0.50855655
## 15 ge.critical.thinking.status         813  0.30245536
## 17              ge.math.status         813  0.30245536
## 18         ge.oral.comm.status         813  0.30245536
## 16      ge.english.comp.status         812  0.30208333
## 14                  admit.term         790  0.29389881
## 33                plan.college         769  0.28608631
## 34           plan.college.desc         769  0.28608631
## 35                   plan.dept         769  0.28608631
## 36               plan.deptAbbr         769  0.28608631
## 37                 plan.degree         769  0.28608631
## 38                   plan.type         769  0.28608631
## 7               sat.math.score         519  0.19308036
## 8                 sat.math.flg         519  0.19308036
## 9             sat.verbal.score         519  0.19308036
## 10              sat.verbal.flg         519  0.19308036
## 11               sat.test.date         519  0.19308036
## 31      tot.passd.prgrss.start         419  0.15587798
## 32      tot.taken.prgrss.start         419  0.15587798
## 39    cum.percent.units.passed         419  0.15587798
## 40                      county         406  0.15104167
## 12                      hs.gpa         321  0.11941964
## 5                  m.rmd.admin         146  0.05431548
## 6           m.rmd.admin.detail         146  0.05431548

## [1] 2688  144

40 variables missing >5%
5 out of 40 variables were important and force included, even though they were missing >5%
So, 35 variables were removed due to missingness and there are now 144 variables instead of 179 variables.

Subset on Complete Cases only in CHEM 4 Data

chem4.dat <- chem4.dat[complete.cases(chem4.dat), ]
dim(chem4.dat) # 1723  144
## [1] 1723  144

1723 out of 2688 students are kept
965 students were removed due to missingness of variables

single.vars <- chem4.dat %>%
  summarise(across(everything(), ~ n_distinct(.x))) %>%
  select_if(. == 1)

# Table of variables with single values
CreateTableOne(vars = names(single.vars), data = chem4.dat)
##                                            
##                                             Overall      
##   n                                         1723         
##   country = USA (%)                         1723 (100.0) 
##   career.course = UGRD (%)                  1723 (100.0) 
##   acad.prog.course = UGD (%)                1723 (100.0) 
##   course (%)                                             
##      CHEM 4                                 1723 (100.0) 
##      CHEM 1A                                   0 (  0.0) 
##      CHEM 1B                                   0 (  0.0) 
##      CHEM 24                                   0 (  0.0) 
##   component = LEC (%)                       1723 (100.0) 
##   units (mean (SD))                         3.00 (0.00)  
##   course.numeric (mean (SD))                4.00 (0.00)  
##   div = Lower Division (%)                  1723 (100.0) 
##   course.seq (mean (SD))                    0.00 (0.00)  
##   rpt.flg = First Attempt (%)               1723 (100.0) 
##   base.time.course (mean (SD))              1.00 (0.00)  
##   years (mean (SD))                         0.50 (0.00)  
##   enroll.status = Continuing (%)            1723 (100.0) 
##   withdraw_code = NWD (%)                   1723 (100.0) 
##   enrl.flg = Enrolled (%)                   1723 (100.0) 
##   enrl.flgERS = Enrolled (%)                1723 (100.0) 
##   rtn.flg = Retained (%)                    1723 (100.0) 
##   rtn.flgERS = Retained (%)                 1723 (100.0) 
##   pass.term.flg = PASS Term (%)             1723 (100.0) 
##   csus.gpa.start.flg = Not Missing (%)      1723 (100.0) 
##   higher.ed.gpa.start.flg = Not Missing (%) 1723 (100.0)
sum(single.vars) # 21
## [1] 21
# remove single-valued variables
chem4.dat<- chem4.dat %>%
  dplyr::select(-names(single.vars))
dim(chem4.dat) # 1723  123
## [1] 1723  123

123 out of 144 variables are kept
21 variables removed due to single values

Identify variables causing complete separation in logistic regression

# Remove non chem4 instructors
chem4.dat <- chem4.dat %>%
  droplevels(chem4.dat$Instructor_01)

# Combine sparse ethnicity categories to Other
chem4.dat <- chem4.dat %>%
  mutate(eth.erss = fct_collapse(eth.erss, `N.A/P.I` = c("Pacific Islander", "Native American")))
with(chem4.dat, table(eth.erss, palN))
##                    palN
## eth.erss              0   1
##   African American   70  32
##   Asian             361 132
##   Foreign            26   9
##   Hispanic          404 234
##   N.A/P.I            15  11
##   Two or More Races  64  31
##   Unknown            35   9
##   White             217  73
# Collapse sparse categories for acad.stand 
# Other:  Academic Dismissal, Academic Disqualification 
chem4.dat <- chem4.dat %>%
  mutate(acad.stand = fct_other(acad.stand, keep = c("Good Standing")))
with(chem4.dat, table(acad.stand, palN))
##                palN
## acad.stand         0    1
##   Good Standing 1076  498
##   Other          116   33

Filter to relevant variables

Sujective judgment was used to narrow the pool of variables down to those likely to be confounders. It’s important to include all variables correlated with outcome even if it is uncertain whether they are related to likeihood of enrolling in PAL. This allows for a more precise estimate of the treatment effect.

chem4.final <- chem4.step.vars(chem4.dat)
kable(names(chem4.final))
x
acad.stand
adm.area
bot.level
cMaj
coh
course.age
csus.gpa.start
cum.percent.units.passed
delay.from.hs
e.rmd
eth.erss
father.ed
fys.flg
gender
hous.coh.term.flg
hs.gpa
Instructor_01
median.income
m.rmd
mother.ed
pct.female.head
pell.coh.term.flg
prevPAL
prevPASS
reason
sac.county.flg
term.units.attemptedCensus
palN
grd.pt.unt
sat.math.score
sat.math.flg
sat.verbal.score
AP_BIOL
AP_CALAB
AP_CALBC
AP_CHEM
AP_BIOL.flg
AP_CALAB.flg
AP_CALBC.flg
AP_CHEM.flg
pct.female.head.flg
med.inc.flg

Build a Logistic Regression Model for Propensity Score

Subjectively identified four potential confounders to force the model to retain: cum.percent.units.passed, gender, eth.erss, sat.math.score, sat.verbal.score, and sat.math.flg (same as sat.verbal.flg). Stepwise variable selection will be used to select which of the variables currently in the PAL dataset to include in the propensity model.

chem4.first.order.prop.model <- chem4.step(chem4.final)

summary(chem4.first.order.prop.model)
## 
## Call:
## glm(formula = model.first.order, family = binomial, data = chem4.final)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -2.0928  -0.8582  -0.5962   1.0823   2.5189  
## 
## Coefficients:
##                                     Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                        -0.313546   0.984757  -0.318 0.750182    
## cum.percent.units.passed           -0.874892   0.563737  -1.552 0.120674    
## eth.erssAsian                      -0.183902   0.257877  -0.713 0.475761    
## eth.erssForeign                     0.097238   0.470616   0.207 0.836309    
## eth.erssHispanic                    0.389267   0.249464   1.560 0.118663    
## eth.erssN.A/P.I                     0.426014   0.492404   0.865 0.386944    
## eth.erssTwo or More Races           0.108771   0.329229   0.330 0.741113    
## eth.erssUnknown                    -0.448564   0.460291  -0.975 0.329796    
## eth.erssWhite                      -0.124588   0.276471  -0.451 0.652252    
## genderMale                         -0.363496   0.130878  -2.777 0.005480 ** 
## sat.math.score                     -0.002785   0.001077  -2.586 0.009715 ** 
## sat.verbal.score                   -0.001747   0.001014  -1.723 0.084824 .  
## sat.math.flgold                     0.392623   0.243203   1.614 0.106444    
## AP_CALAB                           -0.411235   0.206074  -1.996 0.045981 *  
## AP_CALAB.flgNot Missing            -0.664814   0.309910  -2.145 0.031938 *  
## term.units.attemptedCensus          0.147671   0.027876   5.298 1.17e-07 ***
## Instructor_012                     -0.330922   0.600790  -0.551 0.581763    
## Instructor_013                     -0.720154   0.559506  -1.287 0.198051    
## Instructor_016                     -0.893954   0.785129  -1.139 0.254867    
## Instructor_017                     -0.395600   0.747739  -0.529 0.596762    
## Instructor_0110                    -0.181681   0.617853  -0.294 0.768718    
## Instructor_0112                     0.293936   0.508475   0.578 0.563214    
## Instructor_0120                     0.615611   0.549709   1.120 0.262763    
## Instructor_0122                    -0.816065   0.636632  -1.282 0.199896    
## cMajChemistry                      -0.405909   0.229873  -1.766 0.077429 .  
## cMajCivil Engineering              -0.870867   0.239665  -3.634 0.000279 ***
## cMajElectrical Engineering         -0.787875   0.378779  -2.080 0.037522 *  
## cMajEnvironmental Studies           0.232229   0.636605   0.365 0.715266    
## cMajGeology                        -1.415123   0.793576  -1.783 0.074550 .  
## cMajKinesiology/Physical Education -0.436497   0.185440  -2.354 0.018580 *  
## cMajMechanical Engineering         -0.657946   0.229622  -2.865 0.004166 ** 
## cMajNursing                         0.367788   0.367903   1.000 0.317463    
## cMajNutrition                      -0.730409   0.297964  -2.451 0.014233 *  
## cMajOTHER                          -0.444793   0.221100  -2.012 0.044248 *  
## cMajPsychology                     -1.636895   0.571105  -2.866 0.004154 ** 
## cMajUndeclared                     -0.518958   0.307354  -1.688 0.091321 .  
## bot.levelJunior                     0.475672   0.194301   2.448 0.014360 *  
## bot.levelSenior                     0.636372   0.428091   1.487 0.137138    
## bot.levelSophomore                  0.515050   0.145909   3.530 0.000416 ***
## csus.gpa.start                      0.332728   0.123350   2.697 0.006988 ** 
## cohTransfers                       -0.679730   0.456568  -1.489 0.136545    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 2128.4  on 1722  degrees of freedom
## Residual deviance: 1892.6  on 1682  degrees of freedom
## AIC: 1974.6
## 
## Number of Fisher Scoring iterations: 4
p.score <- chem4.first.order.prop.model$fitted.values
chem4.covs <-  names(chem4.first.order.prop.model %>%  pluck("model") %>% dplyr::select(-palN)) 

Propensity Score Matching

Before matching

# Unadjusted mean grades
get.unadj.means(chem4.final)
Unadjusted Mean Grades
Non-PAL PAL Diff.
2.01552 2.343503 0.3279827
##                                         Stratified by palN
##                                          0              1              SMD   
##   n                                        1192            531               
##   cum.percent.units.passed (mean (SD))     0.87 (0.13)    0.85 (0.13)   0.125
##   eth.erss (%)                                                          0.251
##      African American                        70 ( 5.9)      32 ( 6.0)        
##      Asian                                  361 (30.3)     132 (24.9)        
##      Foreign                                 26 ( 2.2)       9 ( 1.7)        
##      Hispanic                               404 (33.9)     234 (44.1)        
##      N.A/P.I                                 15 ( 1.3)      11 ( 2.1)        
##      Two or More Races                       64 ( 5.4)      31 ( 5.8)        
##      Unknown                                 35 ( 2.9)       9 ( 1.7)        
##      White                                  217 (18.2)      73 (13.7)        
##   gender = Male (%)                         589 (49.4)     185 (34.8)   0.298
##   sat.math.score (mean (SD))             492.77 (71.49) 465.20 (69.12)  0.392
##   sat.verbal.score (mean (SD))           470.15 (73.09) 451.90 (72.72)  0.250
##   sat.math.flg = old (%)                   1042 (87.4)     495 (93.2)   0.197
##   AP_CALAB (mean (SD))                     2.51 (0.49)    2.51 (0.41)   0.003
##   AP_CALAB.flg = Not Missing (%)            148 (12.4)      41 ( 7.7)   0.156
##   term.units.attemptedCensus (mean (SD))  13.67 (2.19)   14.16 (2.23)   0.226
##   Instructor_01 (%)                                                     0.348
##      1                                       25 ( 2.1)       6 ( 1.1)        
##      2                                       43 ( 3.6)      11 ( 2.1)        
##      3                                      116 ( 9.7)      22 ( 4.1)        
##      6                                       19 ( 1.6)       4 ( 0.8)        
##      7                                       24 ( 2.0)       4 ( 0.8)        
##      10                                      37 ( 3.1)      12 ( 2.3)        
##      12                                     818 (68.6)     415 (78.2)        
##      20                                      69 ( 5.8)      48 ( 9.0)        
##      22                                      41 ( 3.4)       9 ( 1.7)        
##   cMaj (%)                                                              0.418
##      Biology                                361 (30.3)     232 (43.7)        
##      Chemistry                               85 ( 7.1)      36 ( 6.8)        
##      Civil Engineering                      139 (11.7)      31 ( 5.8)        
##      Electrical Engineering                  46 ( 3.9)      11 ( 2.1)        
##      Environmental Studies                    7 ( 0.6)       5 ( 0.9)        
##      Geology                                 15 ( 1.3)       2 ( 0.4)        
##      Kinesiology/Physical Education         142 (11.9)      70 (13.2)        
##      Mechanical Engineering                 150 (12.6)      39 ( 7.3)        
##      Nursing                                 18 ( 1.5)      18 ( 3.4)        
##      Nutrition                               52 ( 4.4)      20 ( 3.8)        
##      OTHER                                  102 ( 8.6)      45 ( 8.5)        
##      Psychology                              24 ( 2.0)       4 ( 0.8)        
##      Undeclared                              51 ( 4.3)      18 ( 3.4)        
##   bot.level (%)                                                         0.175
##      Freshman                               390 (32.7)     134 (25.2)        
##      Junior                                 210 (17.6)      98 (18.5)        
##      Senior                                  30 ( 2.5)      11 ( 2.1)        
##      Sophomore                              562 (47.1)     288 (54.2)        
##   csus.gpa.start (mean (SD))               2.94 (0.57)    3.01 (0.52)   0.129
##   coh = Transfers (%)                        33 ( 2.8)       7 ( 1.3)   0.103

Check how many variables have SMD > 0.1

addmargins(table(ExtractSmd(unmatched.tab) > 0.1))
## 
## FALSE  TRUE   Sum 
##     1    13    14
get.imbal.vars(unmatched.tab)
Variables with SMD > 0.1
Variable Before Matching SMD
cMaj 0.4176251
sat.math.score 0.3920790
Instructor_01 0.3483413
gender 0.2984060
eth.erss 0.2508546
sat.verbal.score 0.2503217
term.units.attemptedCensus 0.2255303
sat.math.flg 0.1972337
bot.level 0.1752879
AP_CALAB.flg 0.1564959
csus.gpa.start 0.1288580
cum.percent.units.passed 0.1248668
coh 0.1026375

13 variables have SMD >0.1

Implement a propensity score matching method.

match.chem4 <- with(chem4.final, Match(
  Y=chem4.final$grd.pt.unt, Tr = chem4.final$palN, X = p.score, 
  BiasAdjust = F, estimand = "ATT",  M=1, caliper=0.25, replace = TRUE, ties = TRUE))

After matching

Standardized mean differences for continuous variables and categorical variables.

# Needed for match table
chem4.final <- chem4.final %>%
  rownames_to_column(var = "id")

# Matched data
chem4.matched.dat <- chem4.final[unlist(match.chem4[c("index.treated", "index.control")]), ]
chem4.matched.dat$match.weights<-  c(match.chem4$weights, match.chem4$weights)

# Add match weights to match data
weighted.dat<-svydesign(id=~1,weights=~match.weights, data = chem4.matched.dat)
# Variable Summary Table for matched data with match weights
matched.tab <-svyCreateTableOne(vars = chem4.covs,  strata = "palN", data= weighted.dat, smd = TRUE, test = FALSE)
print(matched.tab, smd = TRUE, noSpaces = TRUE)
##                                         Stratified by palN
##                                          0              1              SMD   
##   n                                      530.00         530.00               
##   cum.percent.units.passed (mean (SD))   0.85 (0.13)    0.85 (0.13)    0.019 
##   eth.erss (%)                                                         0.147 
##      African American                    26.1 (4.9)     32.0 (6.0)           
##      Asian                               125.3 (23.6)   132.0 (24.9)         
##      Foreign                             11.8 (2.2)     9.0 (1.7)            
##      Hispanic                            243.9 (46.0)   233.0 (44.0)         
##      N.A/P.I                             4.4 (0.8)      11.0 (2.1)           
##      Two or More Races                   25.8 (4.9)     31.0 (5.8)           
##      Unknown                             12.7 (2.4)     9.0 (1.7)            
##      White                               80.1 (15.1)    73.0 (13.8)          
##   gender = Male (%)                      191.2 (36.1)   185.0 (34.9)   0.025 
##   sat.math.score (mean (SD))             465.40 (70.72) 465.40 (69.00) <0.001
##   sat.verbal.score (mean (SD))           453.36 (69.57) 452.25 (72.31) 0.016 
##   sat.math.flg = old (%)                 489.5 (92.3)   494.0 (93.2)   0.033 
##   AP_CALAB (mean (SD))                   2.51 (0.40)    2.51 (0.41)    0.004 
##   AP_CALAB.flg = Not Missing (%)         45.6 (8.6)     41.0 (7.7)     0.032 
##   term.units.attemptedCensus (mean (SD)) 14.19 (2.18)   14.16 (2.23)   0.014 
##   Instructor_01 (%)                                                    0.146 
##      1                                   11.7 (2.2)     6.0 (1.1)            
##      2                                   8.5 (1.6)      11.0 (2.1)           
##      3                                   20.7 (3.9)     22.0 (4.2)           
##      6                                   7.0 (1.3)      4.0 (0.8)            
##      7                                   7.1 (1.3)      4.0 (0.8)            
##      10                                  12.5 (2.4)     12.0 (2.3)           
##      12                                  418.0 (78.9)   414.0 (78.1)         
##      20                                  38.9 (7.3)     48.0 (9.1)           
##      22                                  5.6 (1.1)      9.0 (1.7)            
##   cMaj (%)                                                             0.134 
##      Biology                             238.6 (45.0)   231.0 (43.6)         
##      Chemistry                           33.2 (6.3)     36.0 (6.8)           
##      Civil Engineering                   25.4 (4.8)     31.0 (5.8)           
##      Electrical Engineering              16.4 (3.1)     11.0 (2.1)           
##      Environmental Studies               6.6 (1.2)      5.0 (0.9)            
##      Geology                             4.0 (0.7)      2.0 (0.4)            
##      Kinesiology/Physical Education      64.3 (12.1)    70.0 (13.2)          
##      Mechanical Engineering              35.3 (6.7)     39.0 (7.4)           
##      Nursing                             18.2 (3.4)     18.0 (3.4)           
##      Nutrition                           16.9 (3.2)     20.0 (3.8)           
##      OTHER                               48.4 (9.1)     45.0 (8.5)           
##      Psychology                          1.7 (0.3)      4.0 (0.8)            
##      Undeclared                          21.0 (4.0)     18.0 (3.4)           
##   bot.level (%)                                                        0.074 
##      Freshman                            119.0 (22.5)   134.0 (25.3)         
##      Junior                              95.6 (18.0)    98.0 (18.5)          
##      Senior                              12.6 (2.4)     11.0 (2.1)           
##      Sophomore                           302.7 (57.1)   287.0 (54.2)         
##   csus.gpa.start (mean (SD))             2.97 (0.50)    3.01 (0.52)    0.081 
##   coh = Transfers (%)                    5.7 (1.1)      7.0 (1.3)      0.023

Balance Check

Continuous variables: Standardized mean differences are computed by using the standard deviation of treated group Binary variables: Raw differences in proportion

All variables are balanced and under the <0.1 mean threshold.

chem4.bal <- bal.tab(match.chem4, formula = f.build("palN", chem4.covs), data = chem4.final,
        distance = ~ p.score, thresholds = c(m = .1), un = TRUE, imbalanced.only = TRUE)
chem4.bal
## Balance Measures
## All covariates are balanced.
## 
## Balance tally for mean differences
##                    count
## Balanced, <0.1        45
## Not Balanced, >0.1     0
## 
## Variable with the greatest mean difference
##        Variable Diff.Adj    M.Threshold
##  csus.gpa.start   0.0794 Balanced, <0.1
## 
## Sample sizes
##                      Control Treated
## All                  1192.       531
## Matched (ESS)         318.44     530
## Matched (Unweighted)  680.       530
## Unmatched             512.         0
## Discarded               0.         1

Check variable percent improvement

get.var.perc.tab(chem4.bal)
##                               Variable       Diff.Un      Diff.Adj % Improvement
## 1                              p.score  0.7912852849  2.660153e-04           100
## 2                       sat.math.score -0.3988551284 -9.585924e-05           100
## 3                         cMaj_Nursing  0.0187976339 -3.773585e-04            98
## 4                      Instructor_01_3 -0.0558841745  2.493261e-03            96
## 5                     sat.verbal.score -0.2509643508 -1.536474e-02            94
## 6           term.units.attemptedCensus  0.2235767269 -1.400638e-02            94
## 7                          gender_Male -0.1457282701 -1.173405e-02            92
## 8                     Instructor_01_12  0.0953026454 -7.520216e-03            92
## 9                         cMaj_Biology  0.1340591388 -1.434187e-02            89
## 10                    Instructor_01_10 -0.0084413984 -1.013028e-03            88
## 11         cMaj_Mechanical Engineering -0.0523925985  6.889039e-03            87
## 12            cum.percent.units.passed -0.1231213537  1.902176e-02            85
## 13                    sat.math.flg_old  0.0580423160  8.580413e-03            85
## 14                       coh_Transfers -0.0145018896  2.493261e-03            83
## 15              cMaj_Civil Engineering -0.0582303239  1.052785e-02            82
## 16            AP_CALAB.flg_Not Missing -0.0469482678 -8.728661e-03            81
## 17                   eth.erss_Hispanic  0.1017517916 -2.060872e-02            80
## 18                      eth.erss_Asian -0.0542647784  1.268643e-02            77
## 19                      eth.erss_White -0.0445705204 -1.332210e-02            70
## 20                     Instructor_01_2 -0.0153581946  4.683288e-03            70
## 21                     cMaj_Psychology -0.0126012715  4.429470e-03            65
## 22                    Instructor_01_22 -0.0174468206  6.331986e-03            64
## 23                  bot.level_Freshman -0.0748271591  2.823001e-02            62
## 24                        cMaj_Geology -0.0088174143 -3.679245e-03            58
## 25                 bot.level_Sophomore  0.0708963713 -2.965633e-02            58
## 26                     Instructor_01_7 -0.0126012715 -5.864780e-03            53
## 27                    Instructor_01_20  0.0325095742  1.725966e-02            47
## 28                    bot.level_Junior  0.0083829422  4.451932e-03            47
## 29                    eth.erss_Unknown -0.0124132636 -6.951932e-03            44
## 30         cMaj_Electrical Engineering -0.0178749731 -1.019317e-02            43
## 31                      csus.gpa.start  0.1357473318  7.943121e-02            41
## 32                     cMaj_Undeclared -0.0088869298 -5.754717e-03            35
## 33                     Instructor_01_6 -0.0084066406 -5.566038e-03            34
## 34                    bot.level_Senior -0.0044521543 -3.025606e-03            32
## 35          cMaj_Environmental Studies  0.0035437126 -2.955975e-03            17
## 36 cMaj_Kinesiology/Physical Education  0.0126992252  1.075921e-02            15
## 37                      cMaj_Nutrition -0.0059593776  5.824349e-03             2
## 38                            AP_CALAB  0.0034431498 -3.675201e-03            -7
## 39                    eth.erss_Foreign -0.0048629280 -5.332435e-03           -10
## 40                     Instructor_01_1 -0.0096737193 -1.080413e-02           -12
## 41                      cMaj_Chemistry -0.0035121147  5.370620e-03           -53
## 42                    eth.erss_N.A/P.I  0.0081317383  1.250000e-02           -54
## 43          eth.erss_Two or More Races  0.0046891391  9.838275e-03          -110
## 44           eth.erss_African American  0.0015388213  1.119048e-02          -627
## 45                          cMaj_OTHER -0.0008247071 -6.498203e-03          -688

Check covariate balance visually

get.bal.plot(unmatched.tab, matched.tab)

love.plot(chem4.bal,binary = "raw", stars = "std", var.order = "unadjusted", 
            thresholds = c(m = .1), abs = F)

Compare single and multiple matches for PAL and non-PAL

create.match.tab(chem4.matched.dat)
PAL and Non-PAL Matches
Non-PAL PAL
Single Matches 343 206
Multiple Matches 337 324
Total Students 680 530

Out of 531 PAL students, 530 were matched and 1 was unable to be matched. Out of 530 PAL student matches, 206 PAL students were matched to one non-PAL student and 324 PAL students were matched to multiple non-PAL students.

Out of 1238 non-PAL student matches, there were 680 non-PAL students, 343 of the non-PAL students were matched to one PAL student and 337 of the non-PAL students were matched to multiple PAL students.

Plot of Propensity Scores for Average Treatment Effect Among the Treated (ATT)

get.att.plot(chem4.final, match.chem4)

Assess balance with prognostic score

The standardized mean differences of the prognostic scores is 0.0900, which indicates balance. All variables are under the 0.01 mean difference threshold. It is likely that the effect estimate will be relatively unbiased, since the estimated prognostic score is balanced.

## 
## Call:
## glm(formula = f.build("grd.pt.unt", chem4.covs), data = ctrl.data)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -3.09610  -0.71453   0.09259   0.72726   2.81319  
## 
## Coefficients:
##                                      Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                        -3.1551135  0.4687028  -6.732 2.64e-11 ***
## cum.percent.units.passed            0.5280403  0.3058402   1.727  0.08452 .  
## eth.erssAsian                       0.6156161  0.1398936   4.401 1.18e-05 ***
## eth.erssForeign                     0.4445376  0.2444358   1.819  0.06923 .  
## eth.erssHispanic                    0.4145461  0.1384392   2.994  0.00281 ** 
## eth.erssN.A/P.I                     1.1880648  0.3029530   3.922 9.32e-05 ***
## eth.erssTwo or More Races           0.5392068  0.1848077   2.918  0.00360 ** 
## eth.erssUnknown                     0.6844153  0.2210742   3.096  0.00201 ** 
## eth.erssWhite                       0.4857870  0.1486030   3.269  0.00111 ** 
## genderMale                         -0.0452349  0.0698444  -0.648  0.51734    
## sat.math.score                      0.0034058  0.0005773   5.900 4.78e-09 ***
## sat.verbal.score                   -0.0008005  0.0005374  -1.490  0.13660    
## sat.math.flgold                    -0.0266766  0.1141820  -0.234  0.81531    
## AP_CALAB                            0.0838340  0.0740435   1.132  0.25777    
## AP_CALAB.flgNot Missing             0.3171721  0.1142736   2.776  0.00560 ** 
## term.units.attemptedCensus         -0.0210416  0.0148051  -1.421  0.15552    
## Instructor_012                      0.6256332  0.2704276   2.313  0.02087 *  
## Instructor_013                     -0.0692320  0.2442575  -0.283  0.77689    
## Instructor_016                     -0.3435549  0.3370825  -1.019  0.30832    
## Instructor_017                      0.2694129  0.3038283   0.887  0.37541    
## Instructor_0110                    -0.0389665  0.2877060  -0.135  0.89229    
## Instructor_0112                     0.1022879  0.2288037   0.447  0.65492    
## Instructor_0120                    -0.0277218  0.2644355  -0.105  0.91653    
## Instructor_0122                     0.0430879  0.2833216   0.152  0.87915    
## cMajChemistry                       0.1952620  0.1279747   1.526  0.12734    
## cMajCivil Engineering               0.1616398  0.1126137   1.435  0.15146    
## cMajElectrical Engineering          0.0486928  0.1720735   0.283  0.77725    
## cMajEnvironmental Studies          -0.8540900  0.4077684  -2.095  0.03643 *  
## cMajGeology                        -0.5551327  0.2853390  -1.946  0.05196 .  
## cMajKinesiology/Physical Education  0.1843223  0.1093237   1.686  0.09206 .  
## cMajMechanical Engineering         -0.1065179  0.1122961  -0.949  0.34305    
## cMajNursing                        -0.5533050  0.2563063  -2.159  0.03107 *  
## cMajNutrition                      -0.0422353  0.1648899  -0.256  0.79789    
## cMajOTHER                           0.0852959  0.1240686   0.687  0.49191    
## cMajPsychology                      0.0750013  0.2259877   0.332  0.74004    
## cMajUndeclared                      0.0353021  0.1595944   0.221  0.82498    
## bot.levelJunior                     0.0487889  0.1048907   0.465  0.64192    
## bot.levelSenior                     0.0325395  0.2267044   0.144  0.88589    
## bot.levelSophomore                 -0.1937920  0.0758361  -2.555  0.01073 *  
## csus.gpa.start                      1.0094300  0.0660161  15.291  < 2e-16 ***
## cohTransfers                        0.0724838  0.2024442   0.358  0.72038    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for gaussian family taken to be 1.095629)
## 
##     Null deviance: 1984.6  on 1191  degrees of freedom
## Residual deviance: 1261.1  on 1151  degrees of freedom
## AIC: 3533.9
## 
## Number of Fisher Scoring iterations: 2
## Balance Measures
##                                         Type Diff.Adj    M.Threshold
## prog.score                          Distance   0.0900 Balanced, <0.1
## cum.percent.units.passed             Contin.   0.0190 Balanced, <0.1
## eth.erss_African American             Binary   0.0112 Balanced, <0.1
## eth.erss_Asian                        Binary   0.0127 Balanced, <0.1
## eth.erss_Foreign                      Binary  -0.0053 Balanced, <0.1
## eth.erss_Hispanic                     Binary  -0.0206 Balanced, <0.1
## eth.erss_N.A/P.I                      Binary   0.0125 Balanced, <0.1
## eth.erss_Two or More Races            Binary   0.0098 Balanced, <0.1
## eth.erss_Unknown                      Binary  -0.0070 Balanced, <0.1
## eth.erss_White                        Binary  -0.0133 Balanced, <0.1
## gender_Male                           Binary  -0.0117 Balanced, <0.1
## sat.math.score                       Contin.  -0.0001 Balanced, <0.1
## sat.verbal.score                     Contin.  -0.0154 Balanced, <0.1
## sat.math.flg_old                      Binary   0.0086 Balanced, <0.1
## AP_CALAB                             Contin.  -0.0037 Balanced, <0.1
## AP_CALAB.flg_Not Missing              Binary  -0.0087 Balanced, <0.1
## term.units.attemptedCensus           Contin.  -0.0140 Balanced, <0.1
## Instructor_01_1                       Binary  -0.0108 Balanced, <0.1
## Instructor_01_2                       Binary   0.0047 Balanced, <0.1
## Instructor_01_3                       Binary   0.0025 Balanced, <0.1
## Instructor_01_6                       Binary  -0.0056 Balanced, <0.1
## Instructor_01_7                       Binary  -0.0059 Balanced, <0.1
## Instructor_01_10                      Binary  -0.0010 Balanced, <0.1
## Instructor_01_12                      Binary  -0.0075 Balanced, <0.1
## Instructor_01_20                      Binary   0.0173 Balanced, <0.1
## Instructor_01_22                      Binary   0.0063 Balanced, <0.1
## cMaj_Biology                          Binary  -0.0143 Balanced, <0.1
## cMaj_Chemistry                        Binary   0.0054 Balanced, <0.1
## cMaj_Civil Engineering                Binary   0.0105 Balanced, <0.1
## cMaj_Electrical Engineering           Binary  -0.0102 Balanced, <0.1
## cMaj_Environmental Studies            Binary  -0.0030 Balanced, <0.1
## cMaj_Geology                          Binary  -0.0037 Balanced, <0.1
## cMaj_Kinesiology/Physical Education   Binary   0.0108 Balanced, <0.1
## cMaj_Mechanical Engineering           Binary   0.0069 Balanced, <0.1
## cMaj_Nursing                          Binary  -0.0004 Balanced, <0.1
## cMaj_Nutrition                        Binary   0.0058 Balanced, <0.1
## cMaj_OTHER                            Binary  -0.0065 Balanced, <0.1
## cMaj_Psychology                       Binary   0.0044 Balanced, <0.1
## cMaj_Undeclared                       Binary  -0.0058 Balanced, <0.1
## bot.level_Freshman                    Binary   0.0282 Balanced, <0.1
## bot.level_Junior                      Binary   0.0045 Balanced, <0.1
## bot.level_Senior                      Binary  -0.0030 Balanced, <0.1
## bot.level_Sophomore                   Binary  -0.0297 Balanced, <0.1
## csus.gpa.start                       Contin.   0.0794 Balanced, <0.1
## coh_Transfers                         Binary   0.0025 Balanced, <0.1
## p.score                              Contin.   0.0003 Balanced, <0.1
## 
## Balance tally for mean differences
##                    count
## Balanced, <0.1        46
## Not Balanced, >0.1     0
## 
## Variable with the greatest mean difference
##        Variable Diff.Adj    M.Threshold
##  csus.gpa.start   0.0794 Balanced, <0.1
## 
## Sample sizes
##                      Control Treated
## All                  1192.       531
## Matched (ESS)         318.44     530
## Matched (Unweighted)  680.       530
## Unmatched             512.         0
## Discarded               0.         1

Estimate Difference Between Mean grade in CHEM 4 of PAL and non-PAL students

The estimated increase in the mean grade of students in PAL over those not in PAL after correcting for self-selection biases is 0.44347. This result is statistically significant with a P-value of \(4.3873x10^{-7}\) and is based on 530 PAL students and 1238 non-PAL student matches(680 total non-PAL students). Note this P-value is for a two-tailed test, but it will be corrected to a one-tailed test (halves the P-value) in the final table output summarizing the effect of PAL across chemistry courses.

summary(match.chem4)
## 
## Estimate...  0.44347 
## AI SE......  0.087792 
## T-stat.....  5.0513 
## p.val......  4.3873e-07 
## 
## Original number of observations..............  1723 
## Original number of treated obs...............  531 
## Matched number of observations...............  530 
## Matched number of observations  (unweighted).  1238 
## 
## Caliper (SDs)........................................   0.25 
## Number of obs dropped by 'exact' or 'caliper'  1

Sensitivity Analysis

psens(match.chem4, Gamma=2.5, GammaInc = 0.1)
## 
##  Rosenbaum Sensitivity Test for Wilcoxon Signed Rank P-Value 
##  
## Unconfounded estimate ....  0 
## 
##  Gamma Lower bound Upper bound
##    1.0           0      0.0000
##    1.1           0      0.0000
##    1.2           0      0.0000
##    1.3           0      0.0000
##    1.4           0      0.0001
##    1.5           0      0.0019
##    1.6           0      0.0228
##    1.7           0      0.1219
##    1.8           0      0.3517
##    1.9           0      0.6410
##    2.0           0      0.8566
##    2.1           0      0.9587
##    2.2           0      0.9912
##    2.3           0      0.9986
##    2.4           0      0.9998
##    2.5           0      1.0000
## 
##  Note: Gamma is Odds of Differential Assignment To
##  Treatment Due to Unobserved Factors 
## 

Note that in the above table \(\Gamma=1.7\) in the first column is the first row where 0.05 is between the Lower and Upper bounds. This means that an unknown confounder which increases the odds of being in PAL by more than 1.7 is enough to change the treatment effect from significant to non-significant. The next code block generates the effect on the odds ratio of each variable in the propensity score. Thus, if there is an unknown confounder that has an effect on the propensity score similar to “cMaj” or “coh” the PAL effect would become non-significant. Thus, this finding is sensitive to unknown confounders. It is possible a variable like the number of hours per week a student works which is not in our dataset is a confounder which could reverse the statistical significance of this analysis.

kable(sort(exp(abs(chem4.first.order.prop.model$coefficients))))
x
sat.verbal.score 1.001748
sat.math.score 1.002788
eth.erssForeign 1.102122
eth.erssTwo or More Races 1.114907
eth.erssWhite 1.132682
term.units.attemptedCensus 1.159132
Instructor_0110 1.199232
eth.erssAsian 1.201897
cMajEnvironmental Studies 1.261409
Instructor_0112 1.341698
(Intercept) 1.368269
Instructor_012 1.392251
csus.gpa.start 1.394769
genderMale 1.438349
cMajNursing 1.444535
eth.erssHispanic 1.475899
sat.math.flgold 1.480860
Instructor_017 1.485276
cMajChemistry 1.500667
AP_CALAB 1.508679
eth.erssN.A/P.I 1.531142
cMajKinesiology/Physical Education 1.547277
cMajOTHER 1.560167
eth.erssUnknown 1.566062
bot.levelJunior 1.609096
bot.levelSophomore 1.673723
cMajUndeclared 1.680276
Instructor_0120 1.850787
bot.levelSenior 1.889612
cMajMechanical Engineering 1.930822
AP_CALAB.flgNot Missing 1.944130
cohTransfers 1.973346
Instructor_013 2.054749
cMajNutrition 2.075929
cMajElectrical Engineering 2.198720
Instructor_0122 2.261584
cMajCivil Engineering 2.388982
cum.percent.units.passed 2.398617
Instructor_016 2.444778
cMajGeology 4.116994
cMajPsychology 5.139186

Propensity Score Adjusted Mean Grades

Adjusted Mean Grades
Course Non-PAL PAL Diff. Std. error p-val Sensitivity N(non-PAL) N(PAL)
CHEM 4 1.9 2.35 0.44 0.09 2.19e-07 1.7 680 530

References

Greifer, Noah. 2020. Cobalt: Covariate Balance Tables and Plots. https://CRAN.R-project.org/package=cobalt.

Leite, W. L. 2017. Practical Propensity Score Methods Using R. Thousand Oaks, CA: Sage Publishing. https://osf.io/nygb5/.

Sekhon, Jasjeet S. 2011. “Multivariate and Propensity Score Matching Software with Automated Balance Optimization: The Matching Package for R.” Journal of Statistical Software 42 (7): 1–52. http://www.jstatsoft.org/v42/i07/.

Yoshida, Kazuki, and Alexander Bartel. 2020. Tableone: Create ’Table 1’ to Describe Baseline Characteristics with or Without Propensity Score Weights. https://CRAN.R-project.org/package=tableone.

Zhang, Z., H. J. Kim, G. Lonjon, Y. Zhu, and written on behalf of AME Big-Data Clinical Trial Collaborative Group. 2019. “Balance Diagnostics After Propensity Score Matching.” Annals of Translational Medicine 7 (1): 16. https://doi.org/10.21037/atm.2018.12.10.