bonus.Rmd
library(dml.sensemakr)
#> See details in:
#> - Chernozhukov, V. Cinelli, C. Newey, W. Sharma, A. Syrgkanis, V. (2021). Long Story Short: Omitted Variable Bias in Causal Machine Learning. National Bureau of Economic Research, Working Paper Series, 30302.
#> - Available at: http://www.nber.org/papers/w30302
Vignette under construction.
rm(list = ls())
data("Penn")
y <- Penn[, "inuidur1"]
d <- Penn[, "tg"]
x <- model.matrix(~ -1 + female + black + othrace + dep + q2 + q3 + q4 + q5 + q6 + agelt35 +
agegt54 + durable + lusd + husd, data = Penn)
# ranger
dml.ranger <- dml(y, d, x, model = "plm")
#> Debiased Machine Learning
#>
#> Model: Partially Linear
#> Target: ate
#> Cross-Fitting: 5 folds, 1 reps
#> ML Method: outcome (ranger), treatment (ranger)
#> Tuning: dirty
#>
#>
#> ====================================
#> Tuning parameters using all the data
#> ====================================
#>
#> - Tuning Model for D.
#> -- Best Tune:
#> mtry min.node.size splitrule
#> 1 5 5 variance
#>
#> - Tuning Model for Y (partially linear).
#> -- Best Tune:
#> mtry min.node.size splitrule
#> 1 5 5 variance
#>
#>
#> ======================================
#> Repeating 5-fold cross-fitting 1 times
#> ======================================
#>
#> -- Rep 1 -- Folds: 1 2 3 4 5
summary(dml.ranger)
#>
#> Debiased Machine Learning
#>
#> Model: Partially Linear
#> Cross-Fitting: 5 folds, 1 reps
#> ML Method: outcome (ranger, R2 = 0.341%), treatment (ranger, R2 = 0%)
#> Tuning: dirty
#>
#> Average Treatment Effect:
#>
#> Estimate Std. Error t value P(>|t|)
#> ate.all -0.07405 0.03535 -2.095 0.0362 *
#> ---
#> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#> Note: DML estimates combined using the median method.
#>
#> Verbal interpretation of DML procedure:
#>
#> -- Average treatment effects were estimated using DML with 5-fold cross-fitting. In order to reduce the variance that stems from sample splitting, we repeated the procedure 1 times. Estimates are combined using the median as the final estimate, incorporating variation across experiments into the standard error as described in Chernozhukov et al. (2018). The outcome regression uses Random Forest from the R package ranger; the treatment regression uses Random Forest from the R package ranger.
# Neural Net Args
nnet.args <- list(method = "nnet",
tuneGrid = data.frame(size = 2, decay = 0.02),
maxit = 1000, maxNWts = 10000)
# nnet
dml.ranger <- dml(y, d, x, model = "plm", reg = nnet.args)
#> Debiased Machine Learning
#>
#> Model: Partially Linear
#> Target: ate
#> Cross-Fitting: 5 folds, 1 reps
#> ML Method: outcome (nnet), treatment (nnet)
#> Tuning: dirty
#>
#>
#> ====================================
#> Tuning parameters using all the data
#> ====================================
#>
#> - Tuning Model for D.
#> -- Best Tune:
#> size decay
#> 1 2 0.02
#>
#> - Tuning Model for Y (partially linear).
#> -- Best Tune:
#> size decay
#> 1 2 0.02
#>
#>
#> ======================================
#> Repeating 5-fold cross-fitting 1 times
#> ======================================
#>
#> -- Rep 1 -- Folds: 1 2 3 4 5
summary(dml.ranger)
#>
#> Debiased Machine Learning
#>
#> Model: Partially Linear
#> Cross-Fitting: 5 folds, 1 reps
#> ML Method: outcome (nnet, R2 = 1.809%), treatment (nnet, R2 = 0%)
#> Tuning: dirty
#>
#> Average Treatment Effect:
#>
#> Estimate Std. Error t value P(>|t|)
#> ate.all -0.07797 0.03527 -2.211 0.027 *
#> ---
#> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#> Note: DML estimates combined using the median method.
#>
#> Verbal interpretation of DML procedure:
#>
#> -- Average treatment effects were estimated using DML with 5-fold cross-fitting. In order to reduce the variance that stems from sample splitting, we repeated the procedure 1 times. Estimates are combined using the median as the final estimate, incorporating variation across experiments into the standard error as described in Chernozhukov et al. (2018). The outcome regression uses Neural Network from the R package nnet; the treatment regression uses Neural Network from the R package nnet.