Skip to content

Commit

Permalink
add +x bit to run.sh, change tf -> tf.compat.v1, freeze requirements …
Browse files Browse the repository at this point in the history
…for tf.contrib

PiperOrigin-RevId: 276586556
  • Loading branch information
andrewluchen authored and copybara-github committed Oct 24, 2019
1 parent 8db04d0 commit 541ef81
Show file tree
Hide file tree
Showing 22 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion attribution/integrated_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def AddBOWIntegratedGradientsOps(graph,
assert len(embedding_lookup.get_shape()) == 3
assert len(embedding.get_shape()) == 2
with graph.as_default():
num_evals = tf.placeholder_with_default(
num_evals = tf.compat.v1.placeholder_with_default(
tf.constant(50, name='num_evals'), shape=())
attribution_dims_map = {embedding: [1] for embedding in embedding_list}
attribution_hooks = AddIntegratedGradientsOps(
Expand Down
5 changes: 3 additions & 2 deletions attribution/integrated_gradients_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,12 @@ class AttributionTest(tf.test.TestCase):

def testAddIntegratedGradientsOps(self):
with tf.Graph().as_default() as graph:
var1 = tf.get_variable(name='var1', initializer=[[[1., 2., 3.]]])
var1 = tf.compat.v1.get_variable(
name='var1', initializer=[[[1., 2., 3.]]])
input_tensor = tf.placeholder(shape=[None, None, 3], dtype=tf.float32)
x = tf.multiply(input_tensor, [[[1.]]])
var1_times_x = tf.multiply(var1, x)
var2 = tf.get_variable(
var2 = tf.compat.v1.get_variable(
name='var2', initializer=[[4., 5.], [6., 7.], [4., 3.]])
matmul = tf.einsum('ijk,kl->ijl', var1_times_x, var2)
output_tensor = tf.reduce_sum(matmul, [1, 2], name='out')
Expand Down
Empty file modified bam/run.sh
100644 → 100755
Empty file.
4 changes: 2 additions & 2 deletions bitempered_loss/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
tensorflow >=1.11.0
tensorflow>=1.11.0,<2.0.0
numpy>=1.13.1
absl>=0.1.6
absl-py>=0.1.6
Empty file modified depth_from_video_in_the_wild/run.sh
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion dql_grasping/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
absl-py>=0.5.0
numpy>=1.13.3
tensorflow>=1.11.0
tensorflow>=1.11.0,<2.0.0
gin-config
pybullet
Pillow==5.3.0
Expand Down
Empty file modified dvrl/run.sh
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion evanet/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
absl-py>=0.5.0
numpy>=1.13.3
tensorflow>=1.11.0
tensorflow>=1.11.0,<2.0.0
protobuf>=3.9.0
2 changes: 1 addition & 1 deletion explaining_risk_increase/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
tensorflow>=1.12.0
tensorflow>=1.12.0,<2.0.0
2 changes: 1 addition & 1 deletion fat/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ virtualenv -p python3 .
source ./bin/activate

pip install tensorflow
pip install -r fat_bert_nq/requirements.txt
pip install -r fat/fat_bert_nq/requirements.txt
python3 -m fat.fat_bert_nq.run_nq_test
Empty file modified genomics_ood/run.sh
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion large_margin/margin_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def large_margin( # pylint: disable=invalid-name
use_approximation=True,
worst_case_loss=True,
layers_weights=None,
loss_collection=tf.GraphKeys.LOSSES):
loss_collection=tf.compat.v1.GraphKeys.LOSSES):
"""Creates a large margin loss.
Args:
Expand Down
9 changes: 4 additions & 5 deletions m_theory/dim4/so8_supergravity_extrema/code/extrema.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,6 @@
import pprint
import time

# Setting up `mpmath` global default precision at initialization time.
# Uses value from the environment variable `MPMATH_DPS`, or 100 if unset.
mpmath.mp.dps = int(os.getenv('MPMATH_DPS', '100'))


from dim4.so8_supergravity_extrema.code import distillation
from dim4.so8_supergravity_extrema.code import scalar_sector_mpmath
from dim4.so8_supergravity_extrema.code import scalar_sector_tensorflow
Expand All @@ -39,6 +34,10 @@
import mpmath
import numpy

# Setting up `mpmath` global default precision at initialization time.
# Uses value from the environment variable `MPMATH_DPS`, or 100 if unset.
mpmath.mp.dps = int(os.getenv('MPMATH_DPS', '100'))


def scan_for_solutions(seed, scale, num_iterations, output_basename):
"""Scans for critical points (with TensorFlow)."""
Expand Down
Empty file modified psycholab/run.sh
100644 → 100755
Empty file.
Empty file modified rllim/run.sh
100644 → 100755
Empty file.
6 changes: 3 additions & 3 deletions robust_loss/adaptive.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _construct_scale(x, scale_lo, scale_init, float_dtype):
else:
# Otherwise we construct a "latent" scale variable and define `scale`
# As an affine function of a softplus on that latent variable.
latent_scale = tf.get_variable(
latent_scale = tf.compat.v1.get_variable(
'LatentScale', initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = util.affine_softplus(latent_scale, lo=scale_lo, ref=scale_init)
return scale
Expand Down Expand Up @@ -176,7 +176,7 @@ def lossfun(x,
alpha_init = (alpha_lo + alpha_hi) / 2.
latent_alpha_init = util.inv_affine_sigmoid(
alpha_init, lo=alpha_lo, hi=alpha_hi)
latent_alpha = tf.get_variable(
latent_alpha = tf.compat.v1.get_variable(
'LatentAlpha',
initializer=tf.fill((1, x.shape[1]),
tf.cast(latent_alpha_init, dtype=float_dtype)))
Expand Down Expand Up @@ -233,7 +233,7 @@ def lossfun_students(x, scale_lo=1e-5, scale_init=1.):
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
log_df = tf.get_variable(
log_df = tf.compat.v1.get_variable(
name='LogDf', initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = _construct_scale(x, scale_lo, scale_init, float_dtype)
loss = util.students_t_nll(x, tf.math.exp(log_df), scale)
Expand Down
4 changes: 2 additions & 2 deletions robust_loss/adaptive_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
true_alpha_init = (alpha_lo + alpha_hi) / 2.
scale_init = float_dtype(np.random.uniform() + 0.5)
scale_lo = float_dtype(np.random.uniform() * 0.1)
with tf.variable_scope('trial_' + str(i)):
with tf.compat.v1.variable_scope('trial_' + str(i)):
_, alpha, scale = adaptive.lossfun(
tf.constant(np.zeros((10, 10), float_dtype)),
alpha_lo=alpha_lo,
Expand Down Expand Up @@ -179,7 +179,7 @@ def testFixedAlphaAndScaleAreCorrect(self, float_dtype):
mu = tf.Variable(
tf.zeros(tf.shape(samples)[1], float_dtype), name='DummyMu')
x = samples - mu[tf.newaxis, :]
with tf.variable_scope('trial_' + str(i)):
with tf.compat.v1.variable_scope('trial_' + str(i)):
loss, alpha, scale = adaptive.lossfun(
x,
alpha_lo=alpha_lo,
Expand Down
2 changes: 1 addition & 1 deletion robust_loss/wavelet.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def pad_reflecting(x, padding_below, padding_above, axis):
i = tf.range(-padding_below, n + padding_above, dtype=tf.int32)
# `j` contains the indices of the input tensor corresponding to the output
# padded tensor.
i_mod = tf.mod(i, tf.maximum(1, 2 * (n - 1)))
i_mod = tf.math.mod(i, tf.maximum(1, 2 * (n - 1)))
j = tf.minimum(2 * (n - 1) - i_mod, i_mod)
return tf.gather(x, j, axis=axis)

Expand Down
Empty file modified sm3/run.sh
100644 → 100755
Empty file.
Empty file modified soft_sort/run.sh
100644 → 100755
Empty file.
Empty file modified tabnet/run.sh
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion towards_gan_benchmarks/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
absl-py>=0.7.0
numpy>=1.15.2
scipy>=1.2.0
tensorflow>=1.10.0
tensorflow>=1.10.0,<2.0.0

0 comments on commit 541ef81

Please sign in to comment.