Skip to content

Commit

Permalink
Change tfp/experimental to use internal imports to speed up tests.
Browse files Browse the repository at this point in the history
  - Updates a few tests to avoid importing `distributions` or `bijectors` wholesale.

PiperOrigin-RevId: 472567989
  • Loading branch information
srvasude authored and tensorflower-gardener committed Sep 6, 2022
1 parent c03f005 commit 24301c5
Show file tree
Hide file tree
Showing 114 changed files with 4,733 additions and 3,633 deletions.
19 changes: 14 additions & 5 deletions tensorflow_probability/python/distributions/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ multi_substrate_py_library(
"//tensorflow_probability/python/internal:distribution_util",
"//tensorflow_probability/python/internal:dtype_util",
"//tensorflow_probability/python/internal:tensorshape_util",
"//tensorflow_probability/python/math/psd_kernels",
"//tensorflow_probability/python/math/psd_kernels:schur_complement",
"//tensorflow_probability/python/util",
],
)
Expand Down Expand Up @@ -1025,6 +1025,8 @@ multi_substrate_py_library(
srcs = ["inverse_gaussian.py"],
deps = [
":distribution",
":inflated",
":negative_binomial",
":normal",
# numpy dep,
# tensorflow dep,
Expand Down Expand Up @@ -1145,6 +1147,8 @@ multi_substrate_py_library(
name = "joint_distribution_util",
srcs = ["joint_distribution_util.py"],
deps = [
":independent",
":joint_distribution_auto_batched",
":joint_distribution_named",
":joint_distribution_sequential",
# tensorflow dep,
Expand Down Expand Up @@ -1420,7 +1424,6 @@ multi_substrate_py_library(
"//tensorflow_probability/python/internal:parameter_properties",
"//tensorflow_probability/python/internal:reparameterization",
"//tensorflow_probability/python/internal:tensor_util",
"//tensorflow_probability/python/math",
"//tensorflow_probability/python/util:seed_stream",
],
)
Expand Down Expand Up @@ -2156,6 +2159,8 @@ multi_substrate_py_library(
"//tensorflow_probability/python/internal:reparameterization",
"//tensorflow_probability/python/internal:tensor_util",
"//tensorflow_probability/python/internal:tensorshape_util",
"//tensorflow_probability/python/math/psd_kernels:positive_semidefinite_kernel",
"//tensorflow_probability/python/math/psd_kernels:schur_complement",
],
)

Expand Down Expand Up @@ -2470,7 +2475,7 @@ multi_substrate_py_test(
"//tensorflow_probability/python/bijectors:scale_matvec_tril",
"//tensorflow_probability/python/internal:reparameterization",
"//tensorflow_probability/python/internal:test_util",
"//tensorflow_probability/python/math",
"//tensorflow_probability/python/math:linalg",
],
)

Expand Down Expand Up @@ -3198,8 +3203,12 @@ multi_substrate_py_test(
name = "inflated_test",
srcs = ["inflated_test.py"],
deps = [
":inflated",
":negative_binomial",
":normal",
# numpy dep,
"//tensorflow_probability",
# tensorflow dep,
"//tensorflow_probability/python/experimental/util",
"//tensorflow_probability/python/internal:test_util",
],
)
Expand Down Expand Up @@ -3230,7 +3239,7 @@ multi_substrate_py_test(
# scipy dep,
# tensorflow dep,
"//tensorflow_probability/python/internal:test_util",
"//tensorflow_probability/python/math",
"//tensorflow_probability/python/math:gradient",
],
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math
from tensorflow_probability.python.bijectors import masked_autoregressive
from tensorflow_probability.python.bijectors import scale_matvec_tril
from tensorflow_probability.python.distributions import autoregressive
Expand All @@ -32,6 +31,7 @@
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import linalg


@test_util.test_all_tf_execution_regimes
Expand All @@ -46,7 +46,7 @@ def setUp(self):
def _random_scale_tril(self, event_size):
n = np.int32(event_size * (event_size + 1) // 2)
p = 2. * self._rng.random_sample(n).astype(np.float32) - 1.
return math.fill_triangular(0.25 * p)
return linalg.fill_triangular(0.25 * p)

def _normal_fn(self, affine_bijector):
def _fn(samples):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.math import psd_kernels as tfpk
from tensorflow_probability.python.math.psd_kernels import schur_complement
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import


Expand Down Expand Up @@ -534,7 +534,7 @@ def __init__(self,

with tf.name_scope('init'):
if _conditional_kernel is None:
_conditional_kernel = tfpk.SchurComplement(
_conditional_kernel = schur_complement.SchurComplement(
base_kernel=kernel,
fixed_inputs=observation_index_points,
cholesky_fn=cholesky_fn,
Expand Down Expand Up @@ -749,7 +749,7 @@ def precompute_regression_model(
if cholesky_fn is None:
cholesky_fn = cholesky_util.make_cholesky_with_jitter_fn(jitter)

conditional_kernel = tfpk.SchurComplement.with_precomputed_divisor(
conditional_kernel = schur_complement.SchurComplement.with_precomputed_divisor(
base_kernel=kernel,
fixed_inputs=observation_index_points,
fixed_inputs_is_missing=observations_is_missing,
Expand Down
50 changes: 26 additions & 24 deletions tensorflow_probability/python/distributions/inflated_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,41 +16,42 @@
import numpy as np
import tensorflow.compat.v2 as tf

import tensorflow_probability as tfp

from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import experimental as tfe
from tensorflow_probability.python.distributions import inflated
from tensorflow_probability.python.distributions import negative_binomial
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.experimental import util
from tensorflow_probability.python.experimental.util import trainable
from tensorflow_probability.python.internal import test_util

tfe_util = tfp.experimental.util


class DistributionsTest(test_util.TestCase):

def test_inflated(self):
zinb = tfd.Inflated(
tfd.NegativeBinomial(5.0, probs=0.1), inflated_loc_probs=0.2)
zinb = inflated.Inflated(
negative_binomial.NegativeBinomial(5.0, probs=0.1),
inflated_loc_probs=0.2)
samples = zinb.sample(sample_shape=10, seed=test_util.test_seed())
self.assertEqual((10,), samples.shape)

spike_and_slab = tfd.Inflated(
tfd.Normal(loc=1.0, scale=2.0), inflated_loc_probs=0.5)
spike_and_slab = inflated.Inflated(
normal.Normal(loc=1.0, scale=2.0), inflated_loc_probs=0.5)
lprob = self.evaluate(spike_and_slab.log_prob(99.0))
self.assertLess(lprob, 0.0)

param_props = tfd.Inflated.parameter_properties(dtype=tf.float32)
param_props = inflated.Inflated.parameter_properties(dtype=tf.float32)
self.assertFalse(param_props['distribution'].is_tensor)
self.assertTrue(param_props['inflated_loc_logits'].is_preferred)
self.assertFalse(param_props['inflated_loc_probs'].is_preferred)
self.assertTrue(param_props['inflated_loc'].is_tensor)

def test_inflated_batched(self):
nb = tfd.NegativeBinomial(
nb = negative_binomial.NegativeBinomial(
total_count=np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
logits=np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32))
zinb = tfd.Inflated(
nb, inflated_loc_probs=np.array(
[0.2, 0.4, 0.6, 0.8, 1.0], dtype=np.float32))
zinb = inflated.Inflated(
nb,
inflated_loc_probs=np.array([0.2, 0.4, 0.6, 0.8, 1.0],
dtype=np.float32))

lprob = zinb.log_prob([0, 1, 2, 3, 4])
self.assertEqual((5,), lprob.shape)
Expand All @@ -59,24 +60,24 @@ def test_inflated_batched(self):
self.assertEqual((5,), samples.shape)

def test_inflated_factory(self):
spike_and_slab_class = tfe.distributions.inflated_factory(
'SpikeAndSlab', tfd.Normal, 0.0)
spike_and_slab_class = inflated.inflated_factory('SpikeAndSlab',
normal.Normal, 0.0)
spike_and_slab = spike_and_slab_class(
inflated_loc_probs=0.3, loc=5.0, scale=2.0)
spike_and_slab2 = tfd.Inflated(
tfd.Normal(loc=5.0, scale=2.0), inflated_loc_probs=0.3)
spike_and_slab2 = inflated.Inflated(
normal.Normal(loc=5.0, scale=2.0), inflated_loc_probs=0.3)
self.assertEqual(
self.evaluate(spike_and_slab.log_prob(7.0)),
self.evaluate(spike_and_slab2.log_prob(7.0)))

def test_zero_inflated_negative_binomial(self):
zinb = tfd.ZeroInflatedNegativeBinomial(
zinb = inflated.ZeroInflatedNegativeBinomial(
inflated_loc_probs=0.2, probs=0.5, total_count=10.0)
self.assertEqual('ZeroInflatedNegativeBinomial', zinb.name)

def test_zinb_is_trainable(self):
init_fn, apply_fn = tfe_util.make_trainable_stateless(
tfd.ZeroInflatedNegativeBinomial,
init_fn, apply_fn = trainable.make_trainable_stateless(
inflated.ZeroInflatedNegativeBinomial,
batch_and_event_shape=[5],
parameter_dtype=tf.float32)
init_obj = init_fn(seed=test_util.test_seed())
Expand All @@ -96,8 +97,9 @@ def test_zinb_is_trainable(self):
disable_numpy=True,
reason='Only TF has composite tensors')
def test_zinb_as_composite_tensor(self):
zinb = tfd.ZeroInflatedNegativeBinomial(0.1, total_count=10.0, probs=0.4)
comp_zinb = tfe.as_composite(zinb)
zinb = inflated.ZeroInflatedNegativeBinomial(
0.1, total_count=10.0, probs=0.4)
comp_zinb = util.as_composite(zinb)
unused_as_tensors = tf.nest.flatten(comp_zinb)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
from scipy import stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfm
from tensorflow_probability.python.distributions import inverse_gaussian
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import gradient


def _scipy_invgauss(loc, concentration):
Expand Down Expand Up @@ -373,7 +373,7 @@ def testInverseGaussianSampleMultidimensionalVariance(self):
def testInverseGaussianFullyReparameterized(self):
concentration = tf.constant(4.0)
loc = tf.constant(3.0)
_, [grad_concentration, grad_loc] = tfm.value_and_gradient(
_, [grad_concentration, grad_loc] = gradient.value_and_gradient(
lambda a, b: inverse_gaussian.InverseGaussian(a, b, validate_args=True). # pylint: disable=g-long-lambda
sample(100, seed=test_util.test_seed()),
[concentration, loc])
Expand All @@ -393,7 +393,7 @@ def gen_samples(l, c):
2, seed=test_util.test_seed())

samples, [loc_grad, concentration_grad] = self.evaluate(
tfm.value_and_gradient(gen_samples, [loc, concentration]))
gradient.value_and_gradient(gen_samples, [loc, concentration]))
self.assertEqual(samples.shape, (2, 4, 3))
self.assertEqual(concentration_grad.shape, concentration.shape)
self.assertEqual(loc_grad.shape, loc.shape)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.math import psd_kernels as tfpk
from tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel
from tensorflow_probability.python.math.psd_kernels import schur_complement as schur_complement_lib


__all__ = [
Expand Down Expand Up @@ -102,7 +103,7 @@ def _validate_observation_data(
index_point_count, observation_count))


class DampedSchurComplement(tfpk.AutoCompositeTensorPsdKernel):
class DampedSchurComplement(psd_kernel.AutoCompositeTensorPsdKernel):
"""Schur complement kernel, damped by scalar factors.
This kernel is the same as the SchurComplement kernel, except we multiply by
Expand Down Expand Up @@ -398,7 +399,7 @@ def __init__(
if _conditional_kernel is None:
_conditional_kernel = DampedSchurComplement(
df=df,
schur_complement=tfpk.SchurComplement(
schur_complement=schur_complement_lib.SchurComplement(
base_kernel=kernel,
fixed_inputs=self._observation_index_points,
diag_shift=observation_noise_variance),
Expand Down Expand Up @@ -606,7 +607,7 @@ def precompute_regression_model(

conditional_kernel = DampedSchurComplement(
df=df,
schur_complement=tfpk.SchurComplement(
schur_complement=schur_complement_lib.SchurComplement(
base_kernel=kernel,
fixed_inputs=observation_index_points,
diag_shift=observation_noise_variance),
Expand Down
Loading

0 comments on commit 24301c5

Please sign in to comment.