Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove several deprecated model properties and deprecate new ones #7033

Merged
merged 3 commits into from
Nov 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 11 additions & 44 deletions pymc/model/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,13 @@
instance._parent = kwargs.get("model")
else:
instance._parent = cls.get_context(error_if_none=False)
instance._pytensor_config = kwargs.get("pytensor_config", {})
pytensor_config = kwargs.get("pytensor_config", {})
if pytensor_config:
warnings.warn(

Check warning on line 501 in pymc/model/core.py

View check run for this annotation

Codecov / codecov/patch

pymc/model/core.py#L501

Added line #L501 was not covered by tests
"pytensor_config is deprecated. Use pytensor.config or pytensor.config.change_flags context manager instead.",
FutureWarning,
)
instance._pytensor_config = pytensor_config
return instance

@staticmethod
Expand Down Expand Up @@ -559,6 +565,7 @@

@property
def model(self):
warnings.warn("Model.model property is deprecated. Just use Model.", FutureWarning)

Check warning on line 568 in pymc/model/core.py

View check run for this annotation

Codecov / codecov/patch

pymc/model/core.py#L568

Added line #L568 was not covered by tests
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤣

return self

@property
Expand Down Expand Up @@ -629,7 +636,7 @@
Whether to sum all logp terms or return elemwise logp for each variable.
Defaults to True.
"""
return self.model.compile_fn(self.logp(vars=vars, jacobian=jacobian, sum=sum))
return self.compile_fn(self.logp(vars=vars, jacobian=jacobian, sum=sum))

def compile_dlogp(
self,
Expand All @@ -646,7 +653,7 @@
jacobian:
Whether to include jacobian terms in logprob graph. Defaults to True.
"""
return self.model.compile_fn(self.dlogp(vars=vars, jacobian=jacobian))
return self.compile_fn(self.dlogp(vars=vars, jacobian=jacobian))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should not it be root model instead?

Copy link
Member Author

@ricardoV94 ricardoV94 Nov 24, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the previous behavior. I don't know what should be happening with compiled function of nested models.

I think nested models are a mistake. The functionality we want to provide with them (plating) is much more narrow than what nested models do. We should probably use something like OpFromGraph for nested models, but that's another story.


def compile_d2logp(
self,
Expand All @@ -663,7 +670,7 @@
jacobian:
Whether to include jacobian terms in logprob graph. Defaults to True.
"""
return self.model.compile_fn(self.d2logp(vars=vars, jacobian=jacobian))
return self.compile_fn(self.d2logp(vars=vars, jacobian=jacobian))

def logp(
self,
Expand Down Expand Up @@ -886,27 +893,11 @@

return vars + untransformed_vars + deterministics

@property
def disc_vars(self):
warnings.warn(
"Model.disc_vars has been deprecated. Use Model.discrete_value_vars instead.",
FutureWarning,
)
return self.discrete_value_vars

@property
def discrete_value_vars(self):
"""All the discrete value variables in the model"""
return list(typefilter(self.value_vars, discrete_types))

@property
def cont_vars(self):
warnings.warn(
"Model.cont_vars has been deprecated. Use Model.continuous_value_vars instead.",
FutureWarning,
)
return self.continuous_value_vars

@property
def continuous_value_vars(self):
"""All the continuous value variables in the model"""
Expand Down Expand Up @@ -935,18 +926,6 @@
"""
return self.free_RVs + self.deterministics

@property
def RV_dims(self) -> Dict[str, Tuple[Union[str, None], ...]]:
"""Tuples of dimension names for specific model variables.

Entries in the tuples may be ``None``, if the RV dimension was not given a name.
"""
warnings.warn(
"Model.RV_dims is deprecated. Use Model.named_vars_to_dims instead.",
FutureWarning,
)
return self.named_vars_to_dims

@property
def coords(self) -> Dict[str, Union[Tuple, None]]:
"""Coordinate values for model dimensions."""
Expand Down Expand Up @@ -1090,18 +1069,6 @@
fn = make_initial_point_fn(model=self, return_transformed=True)
return Point(fn(random_seed), model=self)

@property
def initial_values(self) -> Dict[TensorVariable, Optional[Union[np.ndarray, Variable, str]]]:
"""Maps transformed variables to initial value placeholders.

Keys are the random variables (as returned by e.g. ``pm.Uniform()``) and
values are the numeric/symbolic initial values, strings denoting the strategy to get them, or None.
"""
warnings.warn(
"Model.initial_values is deprecated. Use Model.rvs_to_initial_values instead."
)
return self.rvs_to_initial_values

def set_initval(self, rv_var, initval):
"""Sets an initial value (strategy) for a random variable."""
if initval is not None and not isinstance(initval, (Variable, str)):
Expand Down
31 changes: 14 additions & 17 deletions tests/model/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,17 +705,17 @@ def test_set_initval():
alpha = pm.HalfNormal("alpha", initval=100)
value = pm.NegativeBinomial("value", mu=mu, alpha=alpha)

assert np.array_equal(model.initial_values[mu], np.array([[100.0]]))
np.testing.assert_array_equal(model.initial_values[alpha], np.array(100))
assert model.initial_values[value] is None
assert np.array_equal(model.rvs_to_initial_values[mu], np.array([[100.0]]))
np.testing.assert_array_equal(model.rvs_to_initial_values[alpha], np.array(100))
assert model.rvs_to_initial_values[value] is None

# `Flat` cannot be sampled, so let's make sure that doesn't break initial
# value computations
with pm.Model() as model:
x = pm.Flat("x")
y = pm.Normal("y", x, 1)

assert y in model.initial_values
assert y in model.rvs_to_initial_values


def test_datalogp_multiple_shapes():
Expand Down Expand Up @@ -974,18 +974,6 @@ def test_set_data_constant_shape_error():
pmodel.set_data("y", np.arange(10))


def test_model_deprecation_warning():
with pm.Model() as m:
x = pm.Normal("x", 0, 1, size=2)
y = pm.LogNormal("y", 0, 1, size=2)

with pytest.warns(FutureWarning):
m.disc_vars

with pytest.warns(FutureWarning):
m.cont_vars


@pytest.mark.parametrize("jacobian", [True, False])
def test_model_logp(jacobian):
with pm.Model() as m:
Expand Down Expand Up @@ -1114,11 +1102,20 @@ def test_compile_fn():

def test_model_pytensor_config():
assert pytensor.config.mode != "JAX"
with pm.Model(pytensor_config=dict(mode="JAX")) as model:
with pytest.warns(FutureWarning, match="pytensor_config is deprecated"):
m = pm.Model(pytensor_config=dict(mode="JAX"))
with m:
assert pytensor.config.mode == "JAX"
assert pytensor.config.mode != "JAX"


def test_deprecated_model_property():
m = pm.Model()
with pytest.warns(FutureWarning, match="Model.model property is deprecated"):
m_property = m.model
assert m is m_property


def test_model_parent_set_programmatically():
with pm.Model() as model:
x = pm.Normal("x")
Expand Down
8 changes: 0 additions & 8 deletions tests/sampling/test_mcmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -797,14 +797,6 @@ def test_step_vars_in_model(self):
class TestType:
samplers = (Metropolis, Slice, HamiltonianMC, NUTS)

def setup_method(self):
# save PyTensor config object
self.pytensor_config = copy(pytensor.config)

def teardown_method(self):
# restore pytensor config
pytensor.config = self.pytensor_config

@pytensor.config.change_flags({"floatX": "float64", "warn_float64": "ignore"})
def test_float64(self):
with pm.Model() as model:
Expand Down
4 changes: 0 additions & 4 deletions tests/variational/test_approximations.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,6 @@ def test_scale_cost_to_minibatch_works(aux_total_size):
y_obs = np.array([1.6, 1.4])
beta = len(y_obs) / float(aux_total_size)

# TODO: pytensor_config
# with pm.Model(pytensor_config=dict(floatX='float64')):
# did not not work as expected
# there were some numeric problems, so float64 is forced
with pytensor.config.change_flags(floatX="float64", warn_float64="ignore"):
assert pytensor.config.floatX == "float64"
assert pytensor.config.warn_float64 == "ignore"
Expand Down
Loading