From 66d8c2d8d7e010e4d37c57b250aac77bca54a961 Mon Sep 17 00:00:00 2001 From: Keith Battocchi Date: Fri, 17 Apr 2020 10:59:13 -0400 Subject: [PATCH] Add attributes for nuisance scores --- econml/_ortho_learner.py | 36 +++++-- econml/_rlearner.py | 21 ++++ econml/dml.py | 14 +++ econml/drlearner.py | 25 +++++ econml/ortho_iv.py | 149 +++++++++++++++++++++++++++++ econml/tests/test_dml.py | 6 ++ econml/tests/test_ortho_iv.py | 2 +- econml/tests/test_ortho_learner.py | 95 +++++++++++++++--- 8 files changed, 326 insertions(+), 22 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 32aae153f..0ae736b0e 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -76,7 +76,7 @@ def _crossfit(model, folds, *args, **kwargs): ------- nuisances : tuple of numpy matrices Each entry in the tuple is a nuisance parameter matrix. Each row i-th in the - matric corresponds to the value of the nuisancee parameter for the i-th input + matrix corresponds to the value of the nuisancee parameter for the i-th input sample. model_list : list of objects of same type as input model The cloned and fitted models for each fold. Can be used for inspection of the @@ -85,6 +85,8 @@ def _crossfit(model, folds, *args, **kwargs): The indices of the arrays for which the nuisance value was calculated. This corresponds to the union of the indices of the test part of each fold in the input fold list. + scores : tuple of list of float or None + The out-of-sample model scores for each nuisance model Examples -------- @@ -108,7 +110,7 @@ def predict(self, X, y, W=None): y = X[:, 0] + np.random.normal(size=(5000,)) folds = list(KFold(2).split(X, y)) model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), folds, X, y, W=y, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, X, y, W=y, Z=None) >>> nuisance (array([-1.105728... , -1.537566..., -2.451827... , ..., 1.106287..., @@ -121,18 +123,22 @@ def predict(self, X, y, W=None): """ model_list = [] fitted_inds = [] + calculate_scores = hasattr(model, 'score') if folds is None: # skip crossfitting model_list.append(clone(model, safe=False)) kwargs = {k: v for k, v in kwargs.items() if v is not None} model_list[0].fit(*args, **kwargs) nuisances = model_list[0].predict(*args, **kwargs) + scores = model_list[0].score(*args, **kwargs) if calculate_scores else None if not isinstance(nuisances, tuple): nuisances = (nuisances,) + if not isinstance(scores, tuple): + scores = (scores,) first_arr = args[0] if args else kwargs.items()[0][1] - return nuisances, model_list, np.arange(first_arr.shape[0]) + return nuisances, model_list, np.arange(first_arr.shape[0]), scores for idx, (train_idxs, test_idxs) in enumerate(folds): model_list.append(clone(model, safe=False)) @@ -162,7 +168,19 @@ def predict(self, X, y, W=None): for it, nuis in enumerate(nuisance_temp): nuisances[it][test_idxs] = nuis - return nuisances, model_list, np.sort(fitted_inds.astype(int)) + if calculate_scores: + score_temp = model_list[idx].score(*args_test, **kwargs_test) + + if not isinstance(score_temp, tuple): + score_temp = (score_temp,) + + if idx == 0: + scores = tuple([] for _ in score_temp) + + for it, score in enumerate(score_temp): + scores[it].append(score) + + return nuisances, model_list, np.sort(fitted_inds.astype(int)), (scores if calculate_scores else None) class _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator): @@ -235,6 +253,9 @@ class _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator): methods. If ``discrete_treatment=True``, then the input ``T`` to both above calls will be the one-hot encoding of the original input ``T``, excluding the first column of the one-hot. + If the estimator also provides a score method with the same arguments as fit, it will be used to + calculate scores during training. + model_final: estimator for fitting the response residuals to the features and treatment residuals Must implement `fit` and `predict` methods that must have signatures:: @@ -409,6 +430,8 @@ def score(self, Y, T, W=None, nuisances=None): If the model_final has a score method, then `score_` contains the outcome of the final model score when evaluated on the fitted nuisances from the first stage. Represents goodness of fit, of the final CATE model. + nuisance_scores_ : tuple of lists of floats or None + The out-of-sample scores from training each nuisance model """ def __init__(self, model_nuisance, model_final, *, @@ -561,9 +584,10 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None): reshape(self._label_encoder.transform(T.ravel()), (-1, 1)))[:, 1:]), validate=False) - nuisances, fitted_models, fitted_inds = _crossfit(self._model_nuisance, folds, - Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight) + nuisances, fitted_models, fitted_inds, scores = _crossfit(self._model_nuisance, folds, + Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight) self._models_nuisance = fitted_models + self.nuisance_scores_ = scores return nuisances, fitted_inds def _fit_final(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None): diff --git a/econml/_rlearner.py b/econml/_rlearner.py index d7747cf7e..46db3c35f 100644 --- a/econml/_rlearner.py +++ b/econml/_rlearner.py @@ -185,6 +185,8 @@ def predict(self, X): An instance of the model_final object that was fitted after calling fit. score_ : float The MSE in the final residual on residual regression, i.e. + nuisance_scores_ : list of float + The out-of-sample scores for each nuisance model .. math:: \\frac{1}{n} \\sum_{i=1}^n (Y_i - \\hat{E}[Y|X_i, W_i]\ @@ -213,6 +215,17 @@ def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None): self._model_y.fit(X, W, Y, sample_weight=sample_weight) return self + def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None): + if hasattr(self._model_y, 'score'): + Y_score = self._model_y.score(X, W, Y, sample_weight=sample_weight) + else: + Y_score = None + if hasattr(self._model_t, 'score'): + T_score = self._model_t.score(X, W, T, sample_weight=sample_weight) + else: + T_score = None + return Y_score, T_score + def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None): Y_pred = self._model_y.predict(X, W) T_pred = self._model_t.predict(X, W) @@ -334,3 +347,11 @@ def models_y(self): @property def models_t(self): return [mdl._model_t for mdl in super().models_nuisance] + + @property + def nuisance_scores_y(self): + return self.nuisance_scores_[0] + + @property + def nuisance_scores_t(self): + return self.nuisance_scores_[1] diff --git a/econml/dml.py b/econml/dml.py index 05672355c..5d7284152 100644 --- a/econml/dml.py +++ b/econml/dml.py @@ -100,6 +100,20 @@ def predict(self, X, W): else: return self._model.predict(self._combine(X, W, n_samples, fitting=False)) + def score(self, X, W, Target, sample_weight=None): + if hasattr(self._model, 'score'): + if (not self._is_Y) and self._discrete_treatment: + # In this case, the Target is the one-hot-encoding of the treatment variable + # We need to go back to the label representation of the one-hot so as to call + # the classifier. + Target = inverse_onehot(Target) + if sample_weight is not None: + return self._model.score(self._combine(X, W, Target.shape[0]), Target, sample_weight=sample_weight) + else: + return self._model.score(self._combine(X, W, Target.shape[0]), Target) + else: + return None + class _FinalWrapper: def __init__(self, model_final, fit_cate_intercept, featurizer, use_weight_trick): diff --git a/econml/drlearner.py b/econml/drlearner.py index 538e11ec6..bad16ed1c 100644 --- a/econml/drlearner.py +++ b/econml/drlearner.py @@ -275,6 +275,21 @@ def fit(self, Y, T, X=None, W=None, *, sample_weight=None): self._model_regression.fit(np.hstack([XW, T]), Y, **filtered_kwargs) return self + def score(self, Y, T, X=None, W=None, *, sample_weight=None): + XW = self._combine(X, W) + filtered_kwargs = _filter_none_kwargs(sample_weight=sample_weight) + + if hasattr(self._model_propensity, 'score'): + propensity_score = self._model_propensity.score(XW, inverse_onehot(T), **filtered_kwargs) + else: + propensity_score = None + if hasattr(self._model_regression, 'score'): + regression_score = self._model_regression.fit(np.hstack([XW, T]), Y, **filtered_kwargs) + else: + regression_score = None + + return propensity_score, regression_score + def predict(self, Y, T, X=None, W=None, *, sample_weight=None): XW = self._combine(X, W) propensities = np.maximum(self._model_propensity.predict_proba(XW), min_propensity) @@ -472,6 +487,16 @@ def models_regression(self): """ return [mdl._model_regression for mdl in super().models_nuisance] + @property + def nuisance_scores_propensity(self): + """Gets the score for the propensity model on out-of-sample training data""" + return self.nuisance_scores_[0] + + @property + def nuisance_scores_regression(self): + """Gets the score for the regression model on out-of-sample training data""" + return self.nuisance_scores_[1] + @property def featurizer(self): """ diff --git a/econml/ortho_iv.py b/econml/ortho_iv.py index 69f06d93c..6e9c93ac3 100644 --- a/econml/ortho_iv.py +++ b/econml/ortho_iv.py @@ -61,6 +61,29 @@ def fit(self, X, *args, sample_weight=None): else: self._model.fit(self._combine(X, Z, Target.shape[0]), Target) + def score(self, X, *args, sample_weight=None): + if hasattr(self._model, 'score'): + if len(args) == 1: + Target, = args + Z = None + else: + (Z, Target) = args + if self._discrete_target: + # In this case, the Target is the one-hot-encoding of the treatment variable + # We need to go back to the label representation of the one-hot so as to call + # the classifier. + if np.any(np.all(Target == 0, axis=0)) or (not np.any(np.all(Target == 0, axis=1))): + raise AttributeError("Provided crossfit folds contain training splits that " + + "don't contain all treatments") + Target = inverse_onehot(Target) + + if sample_weight is not None: + return self._model.score(self._combine(X, Z, Target.shape[0]), Target, sample_weight=sample_weight) + else: + return self._model.score(self._combine(X, Z, Target.shape[0]), Target) + else: + return None + def predict(self, X, Z=None): n_samples = X.shape[0] if X is not None else (Z.shape[0] if Z is not None else 1) if self._discrete_target: @@ -203,6 +226,21 @@ def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None): self._model_Z_X.fit(W, Z, sample_weight=sample_weight) return self + def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None): + if hasattr(self._model_Y_X, 'score'): + Y_X_score = self._model_Y_X.score(W, Y, sample_weight=sample_weight) + else: + Y_X_score = None + if hasattr(self._model_T_X, 'score'): + T_X_score = self._model_T_X.score(W, T, sample_weight=sample_weight) + else: + T_X_score = None + if hasattr(self._model_Z_X, 'score'): + Z_X_score = self._model_Z_X.score(W, Z, sample_weight=sample_weight) + else: + Z_X_score = None + return Y_X_score, T_X_score, Z_X_score + def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None): Y_pred = self._model_Y_X.predict(W) T_pred = self._model_T_X.predict(W) @@ -239,6 +277,21 @@ def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None): self._model_T_XZ.fit(W, Z, T, sample_weight=sample_weight) return self + def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None): + if hasattr(self._model_Y_X, 'score'): + Y_X_score = self._model_Y_X.score(W, Y, sample_weight=sample_weight) + else: + Y_X_score = None + if hasattr(self._model_T_X, 'score'): + T_X_score = self._model_T_X.score(W, T, sample_weight=sample_weight) + else: + T_X_score = None + if hasattr(self._model_T_XZ, 'score'): + T_XZ_score = self._model_T_XZ.score(W, Z, T, sample_weight=sample_weight) + else: + T_XZ_score = None + return Y_X_score, T_X_score, T_XZ_score + def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None): Y_pred = self._model_Y_X.predict(W) TX_pred = self._model_T_X.predict(W) @@ -344,6 +397,21 @@ def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None): self._model_T_XZ.fit(X, Z, T, sample_weight=sample_weight) return self + def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None): + if hasattr(self._model_Y_X, 'score'): + Y_X_score = self._model_Y_X.score(X, Y, sample_weight=sample_weight) + else: + Y_X_score = None + if hasattr(self._model_T_X, 'score'): + T_X_score = self._model_T_X.score(X, T, sample_weight=sample_weight) + else: + T_X_score = None + if hasattr(self._model_T_XZ, 'score'): + T_XZ_score = self._model_T_XZ.score(X, Z, T, sample_weight=sample_weight) + else: + T_XZ_score = None + return Y_X_score, T_X_score, T_XZ_score + def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None): Y_pred = self._model_Y_X.predict(X) TXZ_pred = self._model_T_XZ.predict(X, Z) @@ -521,6 +589,27 @@ def models_T_XZ(self): """ return [mdl._model for mdl in super().models_T_XZ] + @property + def nuisance_scores_Y_X(self): + """ + Get the scores for Y_X model on the out-of-sample training data + """ + return self.nuisance_scores_[0] + + @property + def nuisance_scores_T_X(self): + """ + Get the scores for T_X model on the out-of-sample training data + """ + return self.nuisance_scores_[1] + + @property + def nuisance_scores_T_XZ(self): + """ + Get the scores for T_XZ model on the out-of-sample training data + """ + return self.nuisance_scores_[2] + def cate_feature_names(self, input_feature_names=None): """ Get the output feature names. @@ -913,6 +1002,36 @@ def fit(self, Y, T, Z, X=None, *, sample_weight=None, sample_var=None, inference return super().fit(Y, T, X=X, W=None, Z=Z, sample_weight=sample_weight, sample_var=sample_var, inference=inference) + def score(self, Y, T, Z, X=None): + """ + Score the fitted CATE model on a new data set. Generates nuisance parameters + for the new data set based on the fitted nuisance models created at fit time. + It uses the mean prediction of the models fitted by the different crossfit folds. + Then calls the score function of the model_final and returns the calculated score. + The model_final model must have a score method. + + If model_final does not have a score method, then it raises an :exc:`.AttributeError` + + Parameters + ---------- + Y: (n, d_y) matrix or vector of length n + Outcomes for each sample + T: (n, d_t) matrix or vector of length n + Treatments for each sample + Z: (n, d_z) matrix or None (Default=None) + Instruments for each sample + X: optional (n, d_x) matrix or None (Default=None) + Features for each sample + + Returns + ------- + score : float or (array of float) + The score of the final CATE model on the new data. Same type as the return + type of the model_final.score method. + """ + # Replacing score from _OrthoLearner, to enforce W=None and improve the docstring + return super().score(Y, T, X=X, W=None, Z=Z) + @property def original_featurizer(self): return super().model_final._original_featurizer @@ -989,6 +1108,24 @@ def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None): self._prel_model_effect.fit(Y, inverse_onehot(T), inverse_onehot(Z), X=X) return self + def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None): + if hasattr(self._model_Y_X, 'score'): + Y_X_score = self._model_Y_X.score(X, Y, sample_weight=sample_weight) + else: + Y_X_score = None + if hasattr(self._model_T_XZ, 'score'): + T_XZ_score = self._model_T_XZ.score(X, Z, T, sample_weight=sample_weight) + else: + T_XZ_score = None + if hasattr(self._prel_model_effect, 'score'): + # we need to undo the one-hot encoding for calling effect, + # since it expects raw values + effect_score = self._prel_model_effect.score(Y, inverse_onehot(T), inverse_onehot(Z), X=X) + else: + effect_score = None + + return Y_X_score, T_XZ_score, effect_score + def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None): Y_pred = self._model_Y_X.predict(X) T_pred_zero = self._model_T_XZ.predict(X, np.zeros(Z.shape)) @@ -1117,6 +1254,18 @@ def models_Y_X(self): def models_T_XZ(self): return [mdl._model_T_XZ._model for mdl in super().models_nuisance] + @property + def nuisance_scores_Y_X(self): + return self.nuisance_scores_[0] + + @property + def nuisance_scores_T_XZ(self): + return self.nuisance_scores_[1] + + @property + def nuisance_scores_effect(self): + return self.nuisance_scores_[2] + class LinearIntentToTreatDRIV(StatsModelsCateEstimatorMixin, IntentToTreatDRIV): """ diff --git a/econml/tests/test_dml.py b/econml/tests/test_dml.py index 4bc9ad53b..39aa8819e 100644 --- a/econml/tests/test_dml.py +++ b/econml/tests/test_dml.py @@ -148,6 +148,12 @@ def make_random(is_discrete, d): np.testing.assert_array_equal( marg_eff if d_x else marg_eff[0:1], const_marg_eff) + assert isinstance(est.score_, float) + for score in est.nuisance_scores_y: + assert isinstance(score, float) + for score in est.nuisance_scores_t: + assert isinstance(score, float) + T0 = np.full_like(T, 'a') if is_discrete else np.zeros_like(T) eff = est.effect(X, T0=T0, T1=T) self.assertEqual(shape(eff), effect_shape) diff --git a/econml/tests/test_ortho_iv.py b/econml/tests/test_ortho_iv.py index cfe57930a..6aa11e7c3 100644 --- a/econml/tests/test_ortho_iv.py +++ b/econml/tests/test_ortho_iv.py @@ -20,7 +20,7 @@ import econml.tests.utilities # bugfix for assertWarns -class TestDML(unittest.TestCase): +class TestOrthoIV(unittest.TestCase): def test_cate_api(self): """Test that we correctly implement the CATE API.""" diff --git a/econml/tests/test_ortho_learner.py b/econml/tests/test_ortho_learner.py index 0c0786c1b..0be7f6e06 100644 --- a/econml/tests/test_ortho_learner.py +++ b/econml/tests/test_ortho_learner.py @@ -28,18 +28,25 @@ def fit(self, X, y, Q, W=None): def predict(self, X, y, Q, W=None): return self._model.predict(X), y - self._model.predict(X), X + def score(self, X, y, Q, W=None): + return self._model.score(X, y) + np.random.seed(123) X = np.random.normal(size=(5000, 3)) y = X[:, 0] + np.random.normal(size=(5000,)) folds = list(KFold(2).split(X, y)) model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), - folds, - X, y, y, W=y, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), + folds, + X, y, y, W=y, Z=None) np.testing.assert_allclose(nuisance[0][folds[0][1]], model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) np.testing.assert_allclose(nuisance[0][folds[0][0]], model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) coef_ = np.zeros(X.shape[1]) coef_[0] = 1 [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] @@ -50,13 +57,17 @@ def predict(self, X, y, Q, W=None): y = X[:, 0] + np.random.normal(size=(5000,)) folds = list(KFold(2).split(X, y)) model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), - folds, - X, y, None, W=y, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), + folds, + X, y, None, W=y, Z=None) np.testing.assert_allclose(nuisance[0][folds[0][1]], model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) np.testing.assert_allclose(nuisance[0][folds[0][0]], model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) coef_ = np.zeros(X.shape[1]) coef_[0] = 1 [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] @@ -67,13 +78,17 @@ def predict(self, X, y, Q, W=None): y = X[:, 0] + np.random.normal(size=(5000,)) folds = list(KFold(2).split(X, y)) model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), - folds, - X, y, None, W=None, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), + folds, + X, y, None, W=None, Z=None) np.testing.assert_allclose(nuisance[0][folds[0][1]], model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) np.testing.assert_allclose(nuisance[0][folds[0][0]], model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) coef_ = np.zeros(X.shape[1]) coef_[0] = 1 [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] @@ -98,9 +113,9 @@ def predict(self, X, y, W=None): (np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] model = Lasso(alpha=0.01) with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), - folds, - X, y, W=y, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), + folds, + X, y, W=y, Z=None) np.random.seed(123) X = np.random.normal(size=(5000, 3)) @@ -108,9 +123,9 @@ def predict(self, X, y, W=None): folds = [(np.arange(X.shape[0]), np.arange(X.shape[0]))] model = Lasso(alpha=0.01) with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds = _crossfit(Wrapper(model), - folds, - X, y, W=y, Z=None) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), + folds, + X, y, W=y, Z=None) def test_ol(self): @@ -157,6 +172,8 @@ def score(self, Y, T, W=None, nuisances=None): np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=3) np.testing.assert_almost_equal(est.score_, sigma**2, decimal=3) np.testing.assert_almost_equal(est.model_final.model.coef_[0], 1, decimal=3) + # Nuisance model has no score method, so nuisance_scores_ should be none + assert est.nuisance_scores_ is None # Test non keyword based calls to fit np.random.seed(123) @@ -247,6 +264,54 @@ def predict(self, X=None): assert est.score_ is None np.testing.assert_almost_equal(est.model_final.model.coef_[0], 1, decimal=3) + def test_ol_nuisance_scores(self): + class ModelNuisance: + def __init__(self, model_t, model_y): + self._model_t = model_t + self._model_y = model_y + + def fit(self, Y, T, W=None): + self._model_t.fit(W, T) + self._model_y.fit(W, Y) + return self + + def predict(self, Y, T, W=None): + return Y - self._model_y.predict(W), T - self._model_t.predict(W) + + def score(self, Y, T, W=None): + return (self._model_t.score(W, Y), self._model_y.score(W, T)) + + class ModelFinal: + + def __init__(self): + return + + def fit(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) + return self + + def predict(self, X=None): + return self.model.coef_[0] + + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + est = _OrthoLearner(ModelNuisance(LinearRegression(), LinearRegression()), ModelFinal(), + n_splits=2, discrete_treatment=False, discrete_instrument=False, random_state=None) + est.fit(y, X[:, 0], W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + np.testing.assert_almost_equal(est.model_final.model.coef_[0], 1, decimal=3) + nuisance_scores_y = est.nuisance_scores_[0] + nuisance_scores_t = est.nuisance_scores_[1] + assert len(nuisance_scores_y) == len(nuisance_scores_t) == 2 # as many scores as splits + # y scores should be positive, since W predicts Y somewhat + # t scores might not be, since W and T are uncorrelated + np.testing.assert_array_less(0, nuisance_scores_y) + def test_ol_discrete_treatment(self): class ModelNuisance: def __init__(self, model_t, model_y):