@techreport{NBERt0312,
title = "Inference with "Difference in Differences" with a Small Number of Policy Changes",
author = "Timothy Conley and Christopher Taber",
institution = "National Bureau of Economic Research",
type = "Working Paper",
series = "Technical Working Paper Series",
number = "312",
year = "2005",
month = "July",
URL = "http://www.nber.org/papers/t0312",
abstract = {Difference in differences methods have become very popular in applied work. This paper provides a new method for inference in these models when there are a small number of policy changes. This situation occurs in many implementations of these estimators. Identification of the key parameter typically arises when a group "changes" some particular policy. The asymptotic approximations that are typically employed assume that the number of cross sectional groups, N, times the number of time periods, T, is large. However, even when N or T is large, the number of actual policy changes observed in the data is often very small. In this case, we argue that point estimators of treatment effects should not be thought of as being consistent and that the standard methods that researchers use to perform inference in these models are not appropriate. We develop an alternative approach to inference under the assumption that there are a finite number of policy changes in the data, using asymptotic approximations as the number of non-changing groups gets large. In this situation we cannot obtain a consistent point estimator for the key treatment effect parameter. However, we can consistently estimate the finite-sample distribution of the treatment effect estimator, up to the unknown parameter itself. This allows us to perform hypothesis tests and construct confidence intervals. For expositional and motivational purposes, we focus on the difference in differences case, but our approach should be appropriate more generally in treatment effect models which employ a large number of controls, but a small number of treatments. We demonstrate the use of the approach by analyzing the effect of college merit aide programs on college attendance. We show that in some cases the standard approach can give misleading results.},
}