diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 742f5d9462..d31b500260 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -8,6 +8,8 @@ - The GLM submodule has been removed, please use [Bambi](https://bambinos.github.io/bambi/) instead. - The `Distribution` keyword argument `testval` has been deprecated in favor of `initval`. Furthermore `initval` no longer assigns a `tag.test_value` on tensors since the initial values are now kept track of by the model object ([see #4913](https://github.com/pymc-devs/pymc/pull/4913)). - `pm.sample` now returns results as `InferenceData` instead of `MultiTrace` by default (see [#4744](https://github.com/pymc-devs/pymc/pull/4744)). +- `pm.sample_prior_predictive`, `pm.sample_posterior_predictive` and `pm.sample_posterior_predictive_w` now return an `InferenceData` object + by default, instead of a dictionary (see [#5073](https://github.com/pymc-devs/pymc/pull/5073)). - `pm.sample_prior_predictive` no longer returns transformed variable values by default. Pass them by name in `var_names` if you want to obtain these draws (see [4769](https://github.com/pymc-devs/pymc/pull/4769)). - ⚠ `pm.Bound` interface no longer accepts a callable class as argument, instead it requires an instantiated distribution (created via the `.dist()` API) to be passed as an argument. In addition, Bound no longer returns a class instance but works as a normal PyMC distribution. Finally, it is no longer possible to do predictive random sampling from Bounded variables. Please, consult the new documentation for details on how to use Bounded variables (see [4815](https://github.com/pymc-devs/pymc/pull/4815)). - `pm.DensityDist` no longer accepts the `logp` as its first position argument. It is now an optional keyword argument. If you pass a callable as the first positional argument, a `TypeError` will be raised (see [5026](https://github.com/pymc-devs/pymc3/pull/5026)). diff --git a/pymc/sampling.py b/pymc/sampling.py index e3d5c1ac7f..5c22026aac 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -267,11 +267,11 @@ def sample( callback=None, jitter_max_retries=10, *, - return_inferencedata=None, + return_inferencedata=True, idata_kwargs: dict = None, mp_ctx=None, **kwargs, -): +) -> Union[InferenceData, MultiTrace]: r"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. @@ -336,9 +336,9 @@ def sample( Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full`` init methods. - return_inferencedata : bool, default=True + return_inferencedata : bool Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False) - Defaults to `False`, but we'll switch to `True` in an upcoming release. + Defaults to `True`. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` mp_ctx : multiprocessing.context.BaseContent @@ -450,9 +450,6 @@ def sample( if not isinstance(random_seed, abc.Iterable): raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int") - if return_inferencedata is None: - return_inferencedata = True - if not discard_tuned_samples and not return_inferencedata: warnings.warn( "Tuning samples will be included in the returned `MultiTrace` object, which can lead to" @@ -1535,7 +1532,9 @@ def sample_posterior_predictive( random_seed=None, progressbar: bool = True, mode: Optional[Union[str, Mode]] = None, -) -> Dict[str, np.ndarray]: + return_inferencedata=True, + idata_kwargs: dict = None, +) -> Union[InferenceData, Dict[str, np.ndarray]]: """Generate posterior predictive samples from a model given a trace. Parameters @@ -1570,12 +1569,17 @@ def sample_posterior_predictive( time until completion ("expected time of arrival"; ETA). mode: The mode used by ``aesara.function`` to compile the graph. + return_inferencedata : bool + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to True. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - samples : dict - Dictionary with the variable names as keys, and values numpy arrays containing - posterior predictive samples. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the posterior predictive samples (default), or + a dictionary with variable names as keys, and samples as numpy arrays. """ _trace: Union[MultiTrace, PointList] @@ -1724,7 +1728,12 @@ def sample_posterior_predictive( for k, ary in ppc_trace.items(): ppc_trace[k] = ary.reshape((nchain, len_trace, *ary.shape[1:])) - return ppc_trace + if not return_inferencedata: + return ppc_trace + ikwargs = dict(model=model) + if idata_kwargs: + ikwargs.update(idata_kwargs) + return pm.to_inference_data(posterior_predictive=ppc_trace, **ikwargs) def sample_posterior_predictive_w( @@ -1734,6 +1743,8 @@ def sample_posterior_predictive_w( weights: Optional[ArrayLike] = None, random_seed: Optional[int] = None, progressbar: bool = True, + return_inferencedata=True, + idata_kwargs: dict = None, ): """Generate weighted posterior predictive samples from a list of models and a list of traces according to a set of weights. @@ -1760,12 +1771,18 @@ def sample_posterior_predictive_w( Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion ("expected time of arrival"; ETA). + return_inferencedata : bool + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to True. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - samples : dict - Dictionary with the variables as keys. The values corresponding to the - posterior predictive samples from the weighted models. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the posterior predictive samples from the + weighted models (default), or a dictionary with variable names as keys, and samples as + numpy arrays. """ if isinstance(traces[0], InferenceData): n_samples = [ @@ -1884,7 +1901,13 @@ def sample_posterior_predictive_w( except KeyboardInterrupt: pass else: - return {k: np.asarray(v) for k, v in ppc.items()} + ppc = {k: np.asarray(v) for k, v in ppc.items()} + if not return_inferencedata: + return ppc + ikwargs = dict(model=models) + if idata_kwargs: + ikwargs.update(idata_kwargs) + return pm.to_inference_data(posterior_predictive=ppc, **ikwargs) def sample_prior_predictive( @@ -1893,7 +1916,9 @@ def sample_prior_predictive( var_names: Optional[Iterable[str]] = None, random_seed=None, mode: Optional[Union[str, Mode]] = None, -) -> Dict[str, np.ndarray]: + return_inferencedata=True, + idata_kwargs: dict = None, +) -> Union[InferenceData, Dict[str, np.ndarray]]: """Generate samples from the prior predictive distribution. Parameters @@ -1909,12 +1934,17 @@ def sample_prior_predictive( Seed for the random number generator. mode: The mode used by ``aesara.function`` to compile the graph. + return_inferencedata : bool + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to True. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - dict - Dictionary with variable names as keys. The values are numpy arrays of prior - samples. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the prior and prior predictive samples (default), + or a dictionary with variable names as keys and samples as numpy arrays. """ model = modelcontext(model) @@ -1980,7 +2010,13 @@ def sample_prior_predictive( for var_name in vars_: if var_name in data: prior[var_name] = data[var_name] - return prior + + if not return_inferencedata: + return prior + ikwargs = dict(model=model) + if idata_kwargs: + ikwargs.update(idata_kwargs) + return pm.to_inference_data(prior=prior, **ikwargs) def _init_jitter(model, point, chains, jitter_max_retries): diff --git a/pymc/smc/smc.py b/pymc/smc/smc.py index 83b71b478d..bc43fa643a 100644 --- a/pymc/smc/smc.py +++ b/pymc/smc/smc.py @@ -179,6 +179,7 @@ def initialize_population(self) -> Dict[str, NDArray]: self.draws, var_names=[v.name for v in self.model.unobserved_value_vars], model=self.model, + return_inferencedata=False, ) def _initialize_kernel(self): diff --git a/pymc/tests/test_data_container.py b/pymc/tests/test_data_container.py index e6506a1c9d..cfbdaa32a9 100644 --- a/pymc/tests/test_data_container.py +++ b/pymc/tests/test_data_container.py @@ -55,17 +55,19 @@ def test_sample(self): prior_trace1 = pm.sample_prior_predictive(1000) pp_trace1 = pm.sample_posterior_predictive(idata, samples=1000) - assert prior_trace0["b"].shape == (1000,) - assert prior_trace0["obs"].shape == (1000, 100) - assert prior_trace1["obs"].shape == (1000, 200) + assert prior_trace0.prior["b"].shape == (1, 1000) + assert prior_trace0.prior_predictive["obs"].shape == (1, 1000, 100) + assert prior_trace1.prior_predictive["obs"].shape == (1, 1000, 200) - assert pp_trace0["obs"].shape == (1000, 100) - - np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1) - - assert pp_trace1["obs"].shape == (1000, 200) + assert pp_trace0.posterior_predictive["obs"].shape == (1, 1000, 100) + np.testing.assert_allclose( + x, pp_trace0.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) - np.testing.assert_allclose(x_pred, pp_trace1["obs"].mean(axis=0), atol=1e-1) + assert pp_trace1.posterior_predictive["obs"].shape == (1, 1000, 200) + np.testing.assert_allclose( + x_pred, pp_trace1.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_sample_posterior_predictive_after_set_data(self): with pm.Model() as model: @@ -86,8 +88,10 @@ def test_sample_posterior_predictive_after_set_data(self): pm.set_data(new_data={"x": x_test}) y_test = pm.sample_posterior_predictive(trace) - assert y_test["obs"].shape == (1000, 3) - np.testing.assert_allclose(x_test, y_test["obs"].mean(axis=0), atol=1e-1) + assert y_test.posterior_predictive["obs"].shape == (1, 1000, 3) + np.testing.assert_allclose( + x_test, y_test.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_sample_after_set_data(self): with pm.Model() as model: @@ -116,8 +120,10 @@ def test_sample_after_set_data(self): ) pp_trace = pm.sample_posterior_predictive(new_idata, 1000) - assert pp_trace["obs"].shape == (1000, 3) - np.testing.assert_allclose(new_y, pp_trace["obs"].mean(axis=0), atol=1e-1) + assert pp_trace.posterior_predictive["obs"].shape == (1, 1000, 3) + np.testing.assert_allclose( + new_y, pp_trace.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_shared_data_as_index(self): """ @@ -130,7 +136,7 @@ def test_shared_data_as_index(self): alpha = pm.Normal("alpha", 0, 1.5, size=3) pm.Normal("obs", alpha[index], np.sqrt(1e-2), observed=y) - prior_trace = pm.sample_prior_predictive(1000, var_names=["alpha"]) + prior_trace = pm.sample_prior_predictive(1000) idata = pm.sample( 1000, init=None, @@ -146,10 +152,10 @@ def test_shared_data_as_index(self): pm.set_data(new_data={"index": new_index, "y": new_y}) pp_trace = pm.sample_posterior_predictive(idata, 1000, var_names=["alpha", "obs"]) - assert prior_trace["alpha"].shape == (1000, 3) + assert prior_trace.prior["alpha"].shape == (1, 1000, 3) assert idata.posterior["alpha"].shape == (1, 1000, 3) - assert pp_trace["alpha"].shape == (1000, 3) - assert pp_trace["obs"].shape == (1000, 3) + assert pp_trace.posterior_predictive["alpha"].shape == (1, 1000, 3) + assert pp_trace.posterior_predictive["obs"].shape == (1, 1000, 3) def test_shared_data_as_rv_input(self): """ diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 75dffe586d..40f6cdf22b 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -3249,7 +3249,7 @@ def test_distinct_rvs(): X_rv = pm.Normal("x") Y_rv = pm.Normal("y") - pp_samples = pm.sample_prior_predictive(samples=2) + pp_samples = pm.sample_prior_predictive(samples=2, return_inferencedata=False) assert X_rv.owner.inputs[0] != Y_rv.owner.inputs[0] @@ -3259,7 +3259,7 @@ def test_distinct_rvs(): X_rv = pm.Normal("x") Y_rv = pm.Normal("y") - pp_samples_2 = pm.sample_prior_predictive(samples=2) + pp_samples_2 = pm.sample_prior_predictive(samples=2, return_inferencedata=False) assert np.array_equal(pp_samples["y"], pp_samples_2["y"]) diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index b6990c6a38..94e26e0ca8 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -1583,7 +1583,7 @@ def ref_rand(mu, rowcov, colcov): rowcov=np.eye(3), colcov=np.eye(3), ) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3)) @@ -1921,7 +1921,7 @@ def sample_prior(self, distribution, shape, nested_rvs_info, prior_samples): nested_rvs_info, ) with model: - return pm.sample_prior_predictive(prior_samples) + return pm.sample_prior_predictive(prior_samples, return_inferencedata=False) @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "alpha"], @@ -2379,7 +2379,7 @@ def test_car_rng_fn(sparse): with pm.Model(rng_seeder=1): car = pm.CAR("car", mu, W, alpha, tau, size=size) mn = pm.MvNormal("mn", mu, cov, size=size) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) p, f = delta, n_fails while p <= delta and f > 0: diff --git a/pymc/tests/test_idata_conversion.py b/pymc/tests/test_idata_conversion.py index 465169ae13..83106bb091 100644 --- a/pymc/tests/test_idata_conversion.py +++ b/pymc/tests/test_idata_conversion.py @@ -66,8 +66,10 @@ def data(self, eight_schools_params, draws, chains): def get_inference_data(self, data, eight_schools_params): with data.model: - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(data.obj) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) return ( to_inference_data( @@ -85,8 +87,10 @@ def get_predictions_inference_data( self, data, eight_schools_params, inplace ) -> Tuple[InferenceData, Dict[str, np.ndarray]]: with data.model: - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(data.obj) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) idata = to_inference_data( trace=data.obj, @@ -106,7 +110,9 @@ def make_predictions_inference_data( self, data, eight_schools_params ) -> Tuple[InferenceData, Dict[str, np.ndarray]]: with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) idata = predictions_to_inference_data( posterior_predictive, posterior_trace=data.obj, @@ -199,7 +205,9 @@ def test_predictions_to_idata_new(self, data, eight_schools_params): def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools_params): with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj, keep_size=True) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, keep_size=True, return_inferencedata=False + ) inference_data = to_inference_data( trace=data.obj, posterior_predictive=posterior_predictive, @@ -214,7 +222,9 @@ def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools def test_posterior_predictive_warning(self, data, eight_schools_params, caplog): with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj, 370) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, 370, return_inferencedata=False + ) inference_data = to_inference_data( trace=data.obj, posterior_predictive=posterior_predictive, @@ -375,10 +385,7 @@ def test_multiple_observed_rv_without_observations(self): with pm.Model(): mu = pm.Normal("mu") x = pm.DensityDist( # pylint: disable=unused-variable - "x", - mu, - logp=lambda value, mu: pm.Normal.logp(value, mu, 1), - observed=0.1, + "x", mu, logp=lambda value, mu: pm.Normal.logp(value, mu, 1), observed=0.1 ) inference_data = pm.sample(100, chains=2, return_inferencedata=True) test_dict = { @@ -483,7 +490,9 @@ def test_predictions_constant_data(self): y = pm.Data("y", [1.0, 2.0]) beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable - predictive_trace = pm.sample_posterior_predictive(inference_data) + predictive_trace = pm.sample_posterior_predictive( + inference_data, return_inferencedata=False + ) assert set(predictive_trace.keys()) == {"obs"} # this should be four chains of 100 samples # assert predictive_trace["obs"].shape == (400, 2) @@ -506,8 +515,8 @@ def test_no_trace(self): beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable idata = pm.sample(100, tune=100) - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(idata) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive(idata, return_inferencedata=False) # Only prior inference_data = to_inference_data(prior=prior, model=model) @@ -539,7 +548,7 @@ def test_priors_separation(self, use_context): y = pm.Data("y", [1.0, 2.0, 3.0]) beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) test_dict = { "prior": ["beta", "~obs"], @@ -574,10 +583,7 @@ def test_multivariate_observations(self): def test_constant_data_coords_issue_5046(self): """This is a regression test against a bug where a local coords variable was overwritten.""" - dims = { - "alpha": ["backwards"], - "bravo": ["letters", "yesno"], - } + dims = {"alpha": ["backwards"], "bravo": ["letters", "yesno"]} coords = { "backwards": np.arange(17)[::-1], "letters": list("ABCDEFGHIJK"), @@ -592,20 +598,13 @@ def test_constant_data_coords_issue_5046(self): assert len(data[k].shape) == len(dims[k]) ds = pm.backends.arviz.dict_to_dataset( - data=data, - library=pm, - coords=coords, - dims=dims, - default_dims=[], - index_origin=0, + data=data, library=pm, coords=coords, dims=dims, default_dims=[], index_origin=0 ) for dname, cvals in coords.items(): np.testing.assert_array_equal(ds[dname].values, cvals) def test_issue_5043_autoconvert_coord_values(self): - coords = { - "city": pd.Series(["Bonn", "Berlin"]), - } + coords = {"city": pd.Series(["Bonn", "Berlin"])} with pm.Model(coords=coords) as pmodel: # The model tracks coord values as (immutable) tuples assert isinstance(pmodel.coords["city"], tuple) @@ -631,11 +630,7 @@ def test_issue_5043_autoconvert_coord_values(self): trace=mtrace, coords={ "city": pd.MultiIndex.from_tuples( - [ - ("Bonn", 53111), - ("Berlin", 10178), - ], - names=["name", "zipcode"], + [("Bonn", 53111), ("Berlin", 10178)], names=["name", "zipcode"] ) }, ) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index f252200990..a532bdf3b1 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -27,15 +27,14 @@ @pytest.mark.parametrize( - "data", - [ma.masked_values([1, 2, -1, 4, -1], value=-1), pd.DataFrame([1, 2, np.nan, 4, np.nan])], + "data", [ma.masked_values([1, 2, -1, 4, -1], value=-1), pd.DataFrame([1, 2, np.nan, 4, np.nan])] ) def test_missing(data): with Model() as model: x = Normal("x", 1, 1) with pytest.warns(ImputationWarning): - y = Normal("y", x, 1, observed=data) + _ = Normal("y", x, 1, observed=data) assert "y_missing" in model.named_vars @@ -43,7 +42,7 @@ def test_missing(data): assert not np.isnan(model.logp(test_point)) with model: - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"x", "y"} <= set(prior_trace.keys()) @@ -61,7 +60,7 @@ def test_missing_with_predictors(): assert not np.isnan(model.logp(test_point)) with model: - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"x", "y"} <= set(prior_trace.keys()) @@ -77,7 +76,7 @@ def test_missing_dual_observations(): with pytest.warns(ImputationWarning): ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2) - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys()) # TODO: Assert something trace = sample(chains=1, draws=50) @@ -101,7 +100,7 @@ def test_interval_missing_observations(): model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, Interval ) - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) # Make sure the observed + missing combined deterministics have the # same shape as the original observations vectors @@ -122,10 +121,7 @@ def test_interval_missing_observations(): assert {"theta1", "theta2"} <= set(prior_trace.keys()) trace = sample( - chains=1, - draws=50, - compute_convergence_checks=False, - return_inferencedata=False, + chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False ) assert np.all(0 < trace["theta1_missing"].mean(0)) @@ -135,7 +131,7 @@ def test_interval_missing_observations(): # Make sure that the observed values are newly generated samples and that # the observed and deterministic matche - pp_trace = sample_posterior_predictive(trace) + pp_trace = sample_posterior_predictive(trace, return_inferencedata=False) assert np.all(np.var(pp_trace["theta1"], 0) > 0.0) assert np.all(np.var(pp_trace["theta2"], 0) > 0.0) assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0 diff --git a/pymc/tests/test_sampling.py b/pymc/tests/test_sampling.py index 132781815d..5f45d4d69c 100644 --- a/pymc/tests/test_sampling.py +++ b/pymc/tests/test_sampling.py @@ -204,7 +204,7 @@ def test_return_inferencedata(self): assert len(result._groups_warmup) > 0 # inferencedata without tuning, with idata_kwargs - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) result = pm.sample( **kwargs, return_inferencedata=True, @@ -477,19 +477,21 @@ def test_normal_scalar(self): with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], samples=10, return_inferencedata=False + ) # # deprecated argument is not introduced to fast version [2019/08/20:rpg] - ppc = pm.sample_posterior_predictive(trace, var_names=["a"]) + ppc = pm.sample_posterior_predictive(trace, var_names=["a"], return_inferencedata=False) # test empty ppc - ppc = pm.sample_posterior_predictive(trace, var_names=[]) + ppc = pm.sample_posterior_predictive(trace, var_names=[], return_inferencedata=False) assert len(ppc) == 0 # test keep_size parameter - ppc = pm.sample_posterior_predictive(trace, keep_size=True) + ppc = pm.sample_posterior_predictive(trace, keep_size=True, return_inferencedata=False) assert ppc["a"].shape == (nchains, ndraws) # test default case - ppc = pm.sample_posterior_predictive(trace, var_names=["a"]) + ppc = pm.sample_posterior_predictive(trace, var_names=["a"], return_inferencedata=False) assert "a" in ppc assert ppc["a"].shape == (nchains * ndraws,) # mu's standard deviation may have changed thanks to a's observed @@ -498,7 +500,9 @@ def test_normal_scalar(self): # size argument not introduced to fast version [2019/08/20:rpg] with model: - ppc = pm.sample_posterior_predictive(trace, size=5, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + trace, size=5, var_names=["a"], return_inferencedata=False + ) assert ppc["a"].shape == (nchains * ndraws, 5) def test_normal_scalar_idata(self): @@ -521,7 +525,7 @@ def test_normal_scalar_idata(self): idata = pm.to_inference_data(trace) assert isinstance(idata, InferenceData) - ppc = pm.sample_posterior_predictive(idata, keep_size=True) + ppc = pm.sample_posterior_predictive(idata, keep_size=True, return_inferencedata=False) assert ppc["a"].shape == (nchains, ndraws) def test_normal_vector(self, caplog): @@ -532,25 +536,35 @@ def test_normal_vector(self, caplog): with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=[]) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], return_inferencedata=False, samples=10 + ) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=[] + ) assert len(ppc) == 0 # test keep_size parameter - ppc = pm.sample_posterior_predictive(trace, keep_size=True) + ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, keep_size=True) assert ppc["a"].shape == (trace.nchains, len(trace), 2) with pytest.warns(UserWarning): - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) with pytest.warns(UserWarning): - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) # size unsupported by fast_ version argument. [2019/08/19:rpg] - ppc = pm.sample_posterior_predictive(trace, samples=10, var_names=["a"], size=4) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=10, var_names=["a"], size=4 + ) assert "a" in ppc assert ppc["a"].shape == (10, 4, 2) @@ -567,7 +581,7 @@ def test_normal_vector_idata(self, caplog): idata = pm.to_inference_data(trace) assert isinstance(idata, InferenceData) - ppc = pm.sample_posterior_predictive(idata, keep_size=True) + ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False, keep_size=True) assert ppc["a"].shape == (trace.nchains, len(trace), 2) def test_exceptions(self, caplog): @@ -600,11 +614,15 @@ def test_vector_observed(self): # TODO: Assert something about the output # ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=[]) # assert len(ppc) == 0 - ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) - ppc = pm.sample_posterior_predictive(idata, samples=10, var_names=["a"], size=4) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=10, var_names=["a"], size=4 + ) assert "a" in ppc assert ppc["a"].shape == (10, 4, 2) @@ -616,9 +634,13 @@ def test_sum_normal(self): with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], return_inferencedata=False, samples=10 + ) assert ppc0 == {} - ppc = pm.sample_posterior_predictive(idata, samples=1000, var_names=["b"]) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=1000, var_names=["b"] + ) assert len(ppc) == 1 assert ppc["b"].shape == (1000,) scale = np.sqrt(1 + 0.2 ** 2) @@ -637,7 +659,7 @@ def test_model_not_drawable_prior(self): with pytest.raises(NotImplementedError) as excinfo: pm.sample_prior_predictive(50) assert "Cannot sample" in str(excinfo.value) - samples = pm.sample_posterior_predictive(idata, 40) + samples = pm.sample_posterior_predictive(idata, 40, return_inferencedata=False) assert samples["foo"].shape == (40, 200) def test_model_shared_variable(self): @@ -660,7 +682,7 @@ def test_model_shared_variable(self): samples = 100 with model: post_pred = pm.sample_posterior_predictive( - trace, samples=samples, var_names=["p", "obs"] + trace, return_inferencedata=False, samples=samples, var_names=["p", "obs"] ) expected_p = np.array([logistic.eval({coeff: val}) for val in trace["x"][:samples]]) @@ -694,6 +716,7 @@ def test_deterministic_of_observed(self): rtol = 1e-5 if aesara.config.floatX == "float64" else 1e-4 ppc = pm.sample_posterior_predictive( + return_inferencedata=False, model=model, trace=trace, samples=len(trace) * nchains, @@ -728,6 +751,7 @@ def test_deterministic_of_observed_modified_interface(self): trace, varnames=[n for n in trace.varnames if n != "out"] ).to_dict("records") ppc = pm.sample_posterior_predictive( + return_inferencedata=False, model=model, trace=ppc_trace, samples=len(ppc_trace), @@ -745,7 +769,7 @@ def test_variable_type(self): trace = pm.sample(compute_convergence_checks=False, return_inferencedata=False) with model: - ppc = pm.sample_posterior_predictive(trace, samples=1) + ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1) assert ppc["a"].dtype.kind == "f" assert ppc["b"].dtype.kind == "i" @@ -918,7 +942,7 @@ def test_ignores_observed(self): positive_mu = pm.Deterministic("positive_mu", np.abs(mu)) z = -1 - positive_mu pm.Normal("x_obs", mu=z, sigma=1, observed=observed_data) - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) assert "observed_data" not in prior assert (prior["mu"] < -90).all() @@ -932,8 +956,12 @@ def test_respects_shape(self): with pm.Model(): mu = pm.Gamma("mu", 3, 1, size=1) goals = pm.Poisson("goals", mu, size=shape) - trace1 = pm.sample_prior_predictive(10, var_names=["mu", "mu", "goals"]) - trace2 = pm.sample_prior_predictive(10, var_names=["mu", "goals"]) + trace1 = pm.sample_prior_predictive( + 10, return_inferencedata=False, var_names=["mu", "mu", "goals"] + ) + trace2 = pm.sample_prior_predictive( + 10, return_inferencedata=False, var_names=["mu", "goals"] + ) if shape == 2: # want to test shape as an int shape = (2,) assert trace1["goals"].shape == (10,) + shape @@ -944,7 +972,7 @@ def test_multivariate(self): m = pm.Multinomial("m", n=5, p=np.array([0.25, 0.25, 0.25, 0.25])) trace = pm.sample_prior_predictive(10) - assert trace["m"].shape == (10, 4) + assert trace.prior["m"].shape == (1, 10, 4) def test_multivariate2(self): # Added test for issue #3271 @@ -955,8 +983,12 @@ def test_multivariate2(self): burned_trace = pm.sample( 20, tune=10, cores=1, return_inferencedata=False, compute_convergence_checks=False ) - sim_priors = pm.sample_prior_predictive(samples=20, model=dm_model) - sim_ppc = pm.sample_posterior_predictive(burned_trace, samples=20, model=dm_model) + sim_priors = pm.sample_prior_predictive( + return_inferencedata=False, samples=20, model=dm_model + ) + sim_ppc = pm.sample_posterior_predictive( + burned_trace, return_inferencedata=False, samples=20, model=dm_model + ) assert sim_priors["probs"].shape == (20, 6) assert sim_priors["obs"].shape == (20,) + mn_data.shape assert sim_ppc["obs"].shape == (20,) + mn_data.shape @@ -987,9 +1019,9 @@ def test_transformed(self): y = pm.Binomial("y", n=at_bats, p=thetas, observed=hits) gen = pm.sample_prior_predictive(draws) - assert gen["phi"].shape == (draws,) - assert gen["y"].shape == (draws, n) - assert "thetas" in gen + assert gen.prior["phi"].shape == (1, draws) + assert gen.prior_predictive["y"].shape == (1, draws, n) + assert "thetas" in gen.prior.data_vars def test_shared(self): n1 = 10 @@ -1002,16 +1034,16 @@ def test_shared(self): o = pm.Deterministic("o", obs) gen1 = pm.sample_prior_predictive(draws) - assert gen1["y"].shape == (draws, n1) - assert gen1["o"].shape == (draws, n1) + assert gen1.prior["y"].shape == (1, draws, n1) + assert gen1.prior["o"].shape == (1, draws, n1) n2 = 20 obs.set_value(np.random.rand(n2) < 0.5) with m: gen2 = pm.sample_prior_predictive(draws) - assert gen2["y"].shape == (draws, n2) - assert gen2["o"].shape == (draws, n2) + assert gen2.prior["y"].shape == (1, draws, n2) + assert gen2.prior["o"].shape == (1, draws, n2) def test_density_dist(self): obs = np.random.normal(-1, 0.1, size=10) @@ -1025,7 +1057,7 @@ def test_density_dist(self): random=lambda mu, sd, rng=None, size=None: rng.normal(loc=mu, scale=sd, size=size), observed=obs, ) - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) npt.assert_almost_equal(prior["a"].mean(), 0, decimal=1) @@ -1035,7 +1067,7 @@ def test_shape_edgecase(self): sd = pm.Uniform("sd", lower=2, upper=3) x = pm.Normal("x", mu=mu, sigma=sd, size=5) prior = pm.sample_prior_predictive(10) - assert prior["mu"].shape == (10, 5) + assert prior.prior["mu"].shape == (1, 10, 5) def test_zeroinflatedpoisson(self): with pm.Model(): @@ -1043,9 +1075,9 @@ def test_zeroinflatedpoisson(self): psi = pm.HalfNormal("psi", sd=1) pm.ZeroInflatedPoisson("suppliers", psi=psi, theta=theta, size=20) gen_data = pm.sample_prior_predictive(samples=5000) - assert gen_data["theta"].shape == (5000,) - assert gen_data["psi"].shape == (5000,) - assert gen_data["suppliers"].shape == (5000, 20) + assert gen_data.prior["theta"].shape == (1, 5000) + assert gen_data.prior["psi"].shape == (1, 5000) + assert gen_data.prior["suppliers"].shape == (1, 5000, 20) def test_potentials_warning(self): warning_msg = "The effect of Potentials on other parameters is ignored during" @@ -1075,10 +1107,10 @@ def ub_interval_forward(x, ub): ) # Check values are correct - assert np.allclose(prior["ub_log__"], np.log(prior["ub"])) + assert np.allclose(prior.prior["ub_log__"].data, np.log(prior.prior["ub"].data)) assert np.allclose( - prior["x_interval__"], - ub_interval_forward(prior["x"], prior["ub"]), + prior.prior["x_interval__"].data, + ub_interval_forward(prior.prior["x"].data, prior.prior["ub"].data), ) # Check that it works when the original RVs are not mentioned in var_names @@ -1090,9 +1122,16 @@ def ub_interval_forward(x, ub): var_names=["ub_log__", "x_interval__"], samples=10, ) - assert "ub" not in prior_transformed_only and "x" not in prior_transformed_only - assert np.allclose(prior["ub_log__"], prior_transformed_only["ub_log__"]) - assert np.allclose(prior["x_interval__"], prior_transformed_only["x_interval__"]) + assert ( + "ub" not in prior_transformed_only.prior.data_vars + and "x" not in prior_transformed_only.prior.data_vars + ) + assert np.allclose( + prior.prior["ub_log__"].data, prior_transformed_only.prior["ub_log__"].data + ) + assert np.allclose( + prior.prior["x_interval__"], prior_transformed_only.prior["x_interval__"].data + ) def test_issue_4490(self): # Test that samples do not depend on var_name order or, more fundamentally, @@ -1112,27 +1151,34 @@ def test_issue_4490(self): d = pm.Normal("d") prior2 = pm.sample_prior_predictive(samples=1, var_names=["b", "a", "d", "c"]) - assert prior1["a"] == prior2["a"] - assert prior1["b"] == prior2["b"] - assert prior1["c"] == prior2["c"] - assert prior1["d"] == prior2["d"] + assert prior1.prior["a"] == prior2.prior["a"] + assert prior1.prior["b"] == prior2.prior["b"] + assert prior1.prior["c"] == prior2.prior["c"] + assert prior1.prior["d"] == prior2.prior["d"] class TestSamplePosteriorPredictive: def test_point_list_arg_bug_spp(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture with pmodel: - pp = pm.sample_posterior_predictive([trace[15]], var_names=["d"]) + pp = pm.sample_posterior_predictive( + [trace[15]], return_inferencedata=False, var_names=["d"] + ) def test_sample_from_xarray_prior(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture with pmodel: - prior = pm.sample_prior_predictive(samples=20) + prior = pm.sample_prior_predictive( + samples=20, + return_inferencedata=False, + ) idat = pm.to_inference_data(trace, prior=prior) with pmodel: - pp = pm.sample_posterior_predictive(idat.prior, var_names=["d"]) + pp = pm.sample_posterior_predictive( + idat.prior, return_inferencedata=False, var_names=["d"] + ) def test_sample_from_xarray_posterior(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture diff --git a/pymc/tests/test_shared.py b/pymc/tests/test_shared.py index ad09d19ad1..233ba01508 100644 --- a/pymc/tests/test_shared.py +++ b/pymc/tests/test_shared.py @@ -48,10 +48,14 @@ def test_sample(self): prior_trace1 = pm.sample_prior_predictive(1000) pp_trace1 = pm.sample_posterior_predictive(idata, 1000) - assert prior_trace0["b"].shape == (1000,) - assert prior_trace0["obs"].shape == (1000, 100) - np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1) - - assert prior_trace1["b"].shape == (1000,) - assert prior_trace1["obs"].shape == (1000, 200) - np.testing.assert_allclose(x_pred, pp_trace1["obs"].mean(axis=0), atol=1e-1) + assert prior_trace0.prior["b"].shape == (1, 1000) + assert prior_trace0.prior_predictive["obs"].shape == (1, 1000, 100) + np.testing.assert_allclose( + x, pp_trace0.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) + + assert prior_trace1.prior["b"].shape == (1, 1000) + assert prior_trace1.prior_predictive["obs"].shape == (1, 1000, 200) + np.testing.assert_allclose( + x_pred, pp_trace1.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index 8227ad569d..c86b03292e 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -293,8 +293,8 @@ def test_one_gaussian(self): with self.SMABC_test: trace = pm.sample_smc(draws=1000, return_inferencedata=False) - pr_p = pm.sample_prior_predictive(1000) - po_p = pm.sample_posterior_predictive(trace, 1000) + pr_p = pm.sample_prior_predictive(1000, return_inferencedata=False) + po_p = pm.sample_posterior_predictive(trace, 1000, return_inferencedata=False) assert abs(self.data.mean() - trace["a"].mean()) < 0.05 assert abs(self.data.std() - trace["b"].mean()) < 0.05