diff --git a/attribution/integrated_gradients.py b/attribution/integrated_gradients.py index 946ec326282f..13b350296848 100644 --- a/attribution/integrated_gradients.py +++ b/attribution/integrated_gradients.py @@ -289,7 +289,7 @@ def AddBOWIntegratedGradientsOps(graph, assert len(embedding_lookup.get_shape()) == 3 assert len(embedding.get_shape()) == 2 with graph.as_default(): - num_evals = tf.placeholder_with_default( + num_evals = tf.compat.v1.placeholder_with_default( tf.constant(50, name='num_evals'), shape=()) attribution_dims_map = {embedding: [1] for embedding in embedding_list} attribution_hooks = AddIntegratedGradientsOps( diff --git a/attribution/integrated_gradients_test.py b/attribution/integrated_gradients_test.py index 3d9c588674a1..377d7dcea79a 100644 --- a/attribution/integrated_gradients_test.py +++ b/attribution/integrated_gradients_test.py @@ -28,11 +28,12 @@ class AttributionTest(tf.test.TestCase): def testAddIntegratedGradientsOps(self): with tf.Graph().as_default() as graph: - var1 = tf.get_variable(name='var1', initializer=[[[1., 2., 3.]]]) + var1 = tf.compat.v1.get_variable( + name='var1', initializer=[[[1., 2., 3.]]]) input_tensor = tf.placeholder(shape=[None, None, 3], dtype=tf.float32) x = tf.multiply(input_tensor, [[[1.]]]) var1_times_x = tf.multiply(var1, x) - var2 = tf.get_variable( + var2 = tf.compat.v1.get_variable( name='var2', initializer=[[4., 5.], [6., 7.], [4., 3.]]) matmul = tf.einsum('ijk,kl->ijl', var1_times_x, var2) output_tensor = tf.reduce_sum(matmul, [1, 2], name='out') diff --git a/bam/run.sh b/bam/run.sh old mode 100644 new mode 100755 diff --git a/bitempered_loss/requirements.txt b/bitempered_loss/requirements.txt index 4ecc2f7dd437..1d52d7a46c01 100644 --- a/bitempered_loss/requirements.txt +++ b/bitempered_loss/requirements.txt @@ -1,3 +1,3 @@ -tensorflow >=1.11.0 +tensorflow>=1.11.0,<2.0.0 numpy>=1.13.1 -absl>=0.1.6 +absl-py>=0.1.6 diff --git a/depth_from_video_in_the_wild/run.sh b/depth_from_video_in_the_wild/run.sh old mode 100644 new mode 100755 diff --git a/dql_grasping/requirements.txt b/dql_grasping/requirements.txt index 3d4e4513066a..68310a917e6e 100644 --- a/dql_grasping/requirements.txt +++ b/dql_grasping/requirements.txt @@ -1,6 +1,6 @@ absl-py>=0.5.0 numpy>=1.13.3 -tensorflow>=1.11.0 +tensorflow>=1.11.0,<2.0.0 gin-config pybullet Pillow==5.3.0 diff --git a/dvrl/run.sh b/dvrl/run.sh old mode 100644 new mode 100755 diff --git a/evanet/requirements.txt b/evanet/requirements.txt index b33e9ea24c1f..7a21f2ee9233 100644 --- a/evanet/requirements.txt +++ b/evanet/requirements.txt @@ -1,4 +1,4 @@ absl-py>=0.5.0 numpy>=1.13.3 -tensorflow>=1.11.0 +tensorflow>=1.11.0,<2.0.0 protobuf>=3.9.0 diff --git a/explaining_risk_increase/requirements.txt b/explaining_risk_increase/requirements.txt index 6f9706765e60..e7f3006d62ca 100644 --- a/explaining_risk_increase/requirements.txt +++ b/explaining_risk_increase/requirements.txt @@ -1 +1 @@ -tensorflow>=1.12.0 +tensorflow>=1.12.0,<2.0.0 diff --git a/fat/run.sh b/fat/run.sh index 8fb0f57c7a4f..79eac1c87ee1 100755 --- a/fat/run.sh +++ b/fat/run.sh @@ -20,5 +20,5 @@ virtualenv -p python3 . source ./bin/activate pip install tensorflow -pip install -r fat_bert_nq/requirements.txt +pip install -r fat/fat_bert_nq/requirements.txt python3 -m fat.fat_bert_nq.run_nq_test diff --git a/genomics_ood/run.sh b/genomics_ood/run.sh old mode 100644 new mode 100755 diff --git a/large_margin/margin_loss.py b/large_margin/margin_loss.py index 03bcb7c94846..67f6780e28b9 100644 --- a/large_margin/margin_loss.py +++ b/large_margin/margin_loss.py @@ -88,7 +88,7 @@ def large_margin( # pylint: disable=invalid-name use_approximation=True, worst_case_loss=True, layers_weights=None, - loss_collection=tf.GraphKeys.LOSSES): + loss_collection=tf.compat.v1.GraphKeys.LOSSES): """Creates a large margin loss. Args: diff --git a/m_theory/dim4/so8_supergravity_extrema/code/extrema.py b/m_theory/dim4/so8_supergravity_extrema/code/extrema.py index 4a76ac59a793..977dfc6baabe 100644 --- a/m_theory/dim4/so8_supergravity_extrema/code/extrema.py +++ b/m_theory/dim4/so8_supergravity_extrema/code/extrema.py @@ -26,11 +26,6 @@ import pprint import time -# Setting up `mpmath` global default precision at initialization time. -# Uses value from the environment variable `MPMATH_DPS`, or 100 if unset. -mpmath.mp.dps = int(os.getenv('MPMATH_DPS', '100')) - - from dim4.so8_supergravity_extrema.code import distillation from dim4.so8_supergravity_extrema.code import scalar_sector_mpmath from dim4.so8_supergravity_extrema.code import scalar_sector_tensorflow @@ -39,6 +34,10 @@ import mpmath import numpy +# Setting up `mpmath` global default precision at initialization time. +# Uses value from the environment variable `MPMATH_DPS`, or 100 if unset. +mpmath.mp.dps = int(os.getenv('MPMATH_DPS', '100')) + def scan_for_solutions(seed, scale, num_iterations, output_basename): """Scans for critical points (with TensorFlow).""" diff --git a/psycholab/run.sh b/psycholab/run.sh old mode 100644 new mode 100755 diff --git a/rllim/run.sh b/rllim/run.sh old mode 100644 new mode 100755 diff --git a/robust_loss/adaptive.py b/robust_loss/adaptive.py index e96b79584431..f06ce5f17be8 100644 --- a/robust_loss/adaptive.py +++ b/robust_loss/adaptive.py @@ -58,7 +58,7 @@ def _construct_scale(x, scale_lo, scale_init, float_dtype): else: # Otherwise we construct a "latent" scale variable and define `scale` # As an affine function of a softplus on that latent variable. - latent_scale = tf.get_variable( + latent_scale = tf.compat.v1.get_variable( 'LatentScale', initializer=tf.zeros((1, x.shape[1]), float_dtype)) scale = util.affine_softplus(latent_scale, lo=scale_lo, ref=scale_init) return scale @@ -176,7 +176,7 @@ def lossfun(x, alpha_init = (alpha_lo + alpha_hi) / 2. latent_alpha_init = util.inv_affine_sigmoid( alpha_init, lo=alpha_lo, hi=alpha_hi) - latent_alpha = tf.get_variable( + latent_alpha = tf.compat.v1.get_variable( 'LatentAlpha', initializer=tf.fill((1, x.shape[1]), tf.cast(latent_alpha_init, dtype=float_dtype))) @@ -233,7 +233,7 @@ def lossfun_students(x, scale_lo=1e-5, scale_init=1.): float_dtype = x.dtype assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])] with tf.control_dependencies(assert_ops): - log_df = tf.get_variable( + log_df = tf.compat.v1.get_variable( name='LogDf', initializer=tf.zeros((1, x.shape[1]), float_dtype)) scale = _construct_scale(x, scale_lo, scale_init, float_dtype) loss = util.students_t_nll(x, tf.math.exp(log_df), scale) diff --git a/robust_loss/adaptive_test.py b/robust_loss/adaptive_test.py index e36bcc62df96..0d4fd61ac135 100644 --- a/robust_loss/adaptive_test.py +++ b/robust_loss/adaptive_test.py @@ -150,7 +150,7 @@ def testInitialAlphaAndScaleAreCorrect(self, float_dtype): true_alpha_init = (alpha_lo + alpha_hi) / 2. scale_init = float_dtype(np.random.uniform() + 0.5) scale_lo = float_dtype(np.random.uniform() * 0.1) - with tf.variable_scope('trial_' + str(i)): + with tf.compat.v1.variable_scope('trial_' + str(i)): _, alpha, scale = adaptive.lossfun( tf.constant(np.zeros((10, 10), float_dtype)), alpha_lo=alpha_lo, @@ -179,7 +179,7 @@ def testFixedAlphaAndScaleAreCorrect(self, float_dtype): mu = tf.Variable( tf.zeros(tf.shape(samples)[1], float_dtype), name='DummyMu') x = samples - mu[tf.newaxis, :] - with tf.variable_scope('trial_' + str(i)): + with tf.compat.v1.variable_scope('trial_' + str(i)): loss, alpha, scale = adaptive.lossfun( x, alpha_lo=alpha_lo, diff --git a/robust_loss/wavelet.py b/robust_loss/wavelet.py index fe391ff77673..5a6f7871d2c2 100644 --- a/robust_loss/wavelet.py +++ b/robust_loss/wavelet.py @@ -145,7 +145,7 @@ def pad_reflecting(x, padding_below, padding_above, axis): i = tf.range(-padding_below, n + padding_above, dtype=tf.int32) # `j` contains the indices of the input tensor corresponding to the output # padded tensor. - i_mod = tf.mod(i, tf.maximum(1, 2 * (n - 1))) + i_mod = tf.math.mod(i, tf.maximum(1, 2 * (n - 1))) j = tf.minimum(2 * (n - 1) - i_mod, i_mod) return tf.gather(x, j, axis=axis) diff --git a/sm3/run.sh b/sm3/run.sh old mode 100644 new mode 100755 diff --git a/soft_sort/run.sh b/soft_sort/run.sh old mode 100644 new mode 100755 diff --git a/tabnet/run.sh b/tabnet/run.sh old mode 100644 new mode 100755 diff --git a/towards_gan_benchmarks/requirements.txt b/towards_gan_benchmarks/requirements.txt index 189657f6f1a1..31556ad92ecd 100644 --- a/towards_gan_benchmarks/requirements.txt +++ b/towards_gan_benchmarks/requirements.txt @@ -1,4 +1,4 @@ absl-py>=0.7.0 numpy>=1.15.2 scipy>=1.2.0 -tensorflow>=1.10.0 \ No newline at end of file +tensorflow>=1.10.0,<2.0.0