Skip to content

Commit

Permalink
remove unnecessary floating point casts in examples
Browse files Browse the repository at this point in the history
  • Loading branch information
dustinvtran committed Mar 19, 2017
1 parent 3deaa31 commit e48aa80
Show file tree
Hide file tree
Showing 24 changed files with 80 additions and 86 deletions.
12 changes: 6 additions & 6 deletions docs/tex/iclr2017.tex
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
import tensorflow as tf
from edward.models import Categorical, Normal

x_train = np.zeros([N, D], dtype=np.float32)
x_train = np.zeros([N, D])

qbeta = Normal(mu=tf.Variable(tf.zeros([K, D])),
sigma=tf.exp(tf.Variable(tf.zeros([K, D]))))
Expand All @@ -128,7 +128,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
import tensorflow as tf
from edward.models import Empirical

x_train = np.zeros([N, D], dtype=np.float32)
x_train = np.zeros([N, D])

T = 10000 # number of samples
qbeta = Empirical(params=tf.Variable(tf.zeros([T, K, D])))
Expand Down Expand Up @@ -158,7 +158,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
return Dense(1, activation=None)(h)

# DATA
x_train = np.zeros([N, 28 * 28], dtype=np.float32)
x_train = np.zeros([N, 28 * 28])

# MODEL
eps = Normal(mu=tf.zeros([N, d]), sigma=tf.ones([N, d]))
Expand All @@ -181,7 +181,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
from edward.models import Categorical, PointMass

# DATA
x_train = np.zeros([N, D], dtype=np.float32)
x_train = np.zeros([N, D])

# INFERENCE
qbeta = PointMass(params=tf.Variable(tf.zeros([K, D])))
Expand Down Expand Up @@ -253,8 +253,8 @@ \subsubsection{Section 5. Experiments}
T = 100 # number of empirical samples

# DATA
x_data = np.zeros([N, D], dtype=np.float32)
y_data = np.zeros([N], dtype=np.float32)
x_data = np.zeros([N, D])
y_data = np.zeros([N])

# MODEL
x = tf.Variable(x_data, trainable=False)
Expand Down
4 changes: 2 additions & 2 deletions docs/tex/tutorials/mixture-density-network.tex
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ \subsubsection{Data}
from sklearn.model_selection import train_test_split

def build_toy_dataset(N):
y_data = np.random.uniform(-10.5, 10.5, N).astype(np.float32)
r_data = np.random.normal(size=N).astype(np.float32) # random noise
y_data = np.random.uniform(-10.5, 10.5, N)
r_data = np.random.normal(size=N) # random noise
x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0
x_data = x_data.reshape((N, 1))
return train_test_split(x_data, y_data, random_state=42)
Expand Down
2 changes: 1 addition & 1 deletion docs/tex/tutorials/supervised-classification.tex
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ \subsubsection{Data}
{https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/crabs.html}
{crabs data set}.
\begin{lstlisting}[language=Python]
df = np.loadtxt('data/crabs_train.txt', dtype='float32', delimiter=',')
df = np.loadtxt('data/crabs_train.txt', delimiter=',')
df[df[:, 0] == -1, 0] = 0 # replace -1 label with 0 label

N = 25 # number of data points
Expand Down
4 changes: 2 additions & 2 deletions docs/tex/tutorials/supervised-regression.tex
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ \subsubsection{Data}
\begin{lstlisting}[language=Python]
def build_toy_dataset(N, w, noise_std=0.1):
D = len(w)
x = np.random.randn(N, D).astype(np.float32)
x = np.random.randn(N, D)
y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)
return x, y

Expand Down Expand Up @@ -135,7 +135,7 @@ \subsubsection{Criticism}
w_samples = w.sample(n_samples).eval()
b_samples = b.sample(n_samples).eval()
plt.scatter(X_data, y_data)
inputs = np.linspace(-1, 10, num=400, dtype=np.float32)
inputs = np.linspace(-1, 10, num=400)
for ns in range(n_samples):
output = inputs * w_samples[ns] + b_samples[ns]
plt.plot(inputs, output)
Expand Down
2 changes: 1 addition & 1 deletion docs/tex/tutorials/unsupervised.tex
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ \subsubsection{Data}
pi = np.array([0.4, 0.6])
mus = [[1, 1], [-1, -1]]
stds = [[0.1, 0.1], [0.1, 0.1]]
x = np.zeros((N, 2), dtype=np.float32)
x = np.zeros((N, 2))
for n in range(N):
k = np.argmax(np.random.multinomial(1, pi))
x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
Expand Down
2 changes: 1 addition & 1 deletion examples/bayesian_linear_regression_implicitklqp.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

def build_toy_dataset(N, w, noise_std=0.1):
D = len(w)
x = np.random.randn(N, D).astype(np.float32)
x = np.random.randn(N, D)
y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)
return x, y

Expand Down
7 changes: 3 additions & 4 deletions examples/bayesian_linear_regression_ppc.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ def build_toy_dataset(N, noise_std=0.5):
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = 2.0 * X + 10 * np.random.normal(0, noise_std, size=N)
X = X.astype(np.float32).reshape((N, 1))
y = y.astype(np.float32)
X = X.reshape((N, 1))
return X, y


Expand Down Expand Up @@ -68,7 +67,7 @@ def build_toy_dataset(N, noise_std=0.5):

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400, dtype=np.float32)
inputs = np.linspace(-1, 10, num=400)
for ns in range(n_prior_samples):
output = inputs * w_prior[ns] + b_prior[ns]
plt.plot(inputs, output)
Expand All @@ -83,7 +82,7 @@ def build_toy_dataset(N, noise_std=0.5):

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400, dtype=np.float32)
inputs = np.linspace(-1, 10, num=400)
for ns in range(n_posterior_samples):
output = inputs * w_post[ns] + b_post[ns]
plt.plot(inputs, output)
Expand Down
7 changes: 3 additions & 4 deletions examples/bayesian_linear_regression_sghmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ def build_toy_dataset(N, noise_std=0.5):
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = 2.0 * X + 10 * np.random.normal(0, noise_std, size=N)
X = X.astype(np.float32).reshape((N, 1))
y = y.astype(np.float32)
X = X.reshape((N, 1))
return X, y


Expand Down Expand Up @@ -78,7 +77,7 @@ def build_toy_dataset(N, noise_std=0.5):

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400, dtype=np.float32)
inputs = np.linspace(-1, 10, num=400)
for ns in range(n_prior_samples):
output = inputs * w_prior[ns] + b_prior[ns]
plt.plot(inputs, output)
Expand All @@ -93,7 +92,7 @@ def build_toy_dataset(N, noise_std=0.5):

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400, dtype=np.float32)
inputs = np.linspace(-1, 10, num=400)
for ns in range(n_posterior_samples):
output = inputs * w_post[ns] + b_post[ns]
plt.plot(inputs, output)
Expand Down
5 changes: 2 additions & 3 deletions examples/bayesian_linear_regression_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ def build_toy_dataset(N, noise_std=0.1):
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = 5.0 * X + np.random.normal(0, noise_std, size=N)
X = X.astype(np.float32).reshape((N, 1))
y = y.astype(np.float32)
X = X.reshape((N, 1))
return X, y


Expand All @@ -38,7 +37,7 @@ def build_toy_dataset(N, noise_std=0.1):
X_data, y_data = build_toy_dataset(N)

# MODEL
X = X_data
X = tf.cast(X_data, tf.float32)
w = Normal(mu=tf.zeros(D), sigma=tf.ones(D))
b = Normal(mu=tf.zeros(1), sigma=tf.ones(1))
y = Normal(mu=ed.dot(X, w) + b, sigma=tf.ones(N))
Expand Down
23 changes: 11 additions & 12 deletions examples/bayesian_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,16 @@

def build_toy_dataset(N=40, noise_std=0.1):
D = 1
x = np.concatenate([np.linspace(0, 2, num=N / 2),
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = np.cos(x) + np.random.normal(0, noise_std, size=N)
x = (x - 4.0) / 4.0
x = x.astype(np.float32).reshape((N, D))
y = y.astype(np.float32)
return x, y
y = np.cos(X) + np.random.normal(0, noise_std, size=N)
X = (X - 4.0) / 4.0
X = X.reshape((N, D))
return X, y


def neural_network(x):
h = tf.tanh(tf.matmul(x, W_0) + b_0)
def neural_network(X):
h = tf.tanh(tf.matmul(X, W_0) + b_0)
h = tf.tanh(tf.matmul(h, W_1) + b_1)
h = tf.matmul(h, W_2) + b_2
return tf.reshape(h, [-1])
Expand All @@ -44,7 +43,7 @@ def neural_network(x):
D = 1 # number of features

# DATA
x_train, y_train = build_toy_dataset(N)
X_train, y_train = build_toy_dataset(N)

# MODEL
W_0 = Normal(mu=tf.zeros([D, 10]), sigma=tf.ones([D, 10]))
Expand All @@ -54,8 +53,8 @@ def neural_network(x):
b_1 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))
b_2 = Normal(mu=tf.zeros(1), sigma=tf.ones(1))

x = x_train
y = Normal(mu=neural_network(x), sigma=0.1 * tf.ones(N))
X = tf.placeholder(tf.float32, [N, D])
y = Normal(mu=neural_network(X), sigma=0.1 * tf.ones(N))

# INFERENCE
qW_0 = Normal(mu=tf.Variable(tf.random_normal([D, 10])),
Expand All @@ -73,5 +72,5 @@ def neural_network(x):

inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2}, data={y: y_train})
W_2: qW_2, b_2: qb_2}, data={X: X_train, y: y_train})
inference.run()
29 changes: 11 additions & 18 deletions examples/dirichlet_categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,19 @@
N = 1000
K = 4

# Data generation
alpha = np.array([20., 30., 10., 10.])
pi = np.random.dirichlet(alpha).astype(np.float32)
zn_data = np.array([np.random.choice(K, 1, p=pi)[0] for n in range(N)])
print('pi={}'.format(pi))

# Prior definition
alpha_prior = tf.Variable(np.array([1., 1., 1., 1.]),
dtype=tf.float32, trainable=False)

# Posterior inference
# Probabilistic model
pi = Dirichlet(alpha=alpha_prior)
zn = Categorical(p=tf.ones([N, 1]) * pi)

# Variational model
# DATA
pi_true = np.random.dirichlet(np.array([20.0, 30.0, 10.0, 10.0]))
z_data = np.array([np.random.choice(K, 1, p=pi_true)[0] for n in range(N)])
print('pi={}'.format(pi_true))

# MODEL
pi = Dirichlet(alpha=tf.ones(4))
z = Categorical(p=tf.ones([N, 1]) * pi)

# INFERENCE
qpi = Dirichlet(alpha=tf.nn.softplus(tf.Variable(tf.random_normal([K]))))

# Inference
inference = ed.KLqp({pi: qpi}, data={zn: zn_data})
inference = ed.KLqp({pi: qpi}, data={z: z_data})
inference.run(n_iter=1500, n_samples=30)

sess = ed.get_session()
Expand Down
4 changes: 2 additions & 2 deletions examples/invgamma_normal_mh.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
print('sigma={}'.format(sigma))

# Prior definition
alpha = tf.Variable(0.5, dtype=tf.float32, trainable=False)
beta = tf.Variable(0.7, dtype=tf.float32, trainable=False)
alpha = tf.Variable(0.5, trainable=False)
beta = tf.Variable(0.7, trainable=False)

# Posterior inference
# Probabilistic model
Expand Down
2 changes: 1 addition & 1 deletion examples/mixture_gaussian_mh.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def build_toy_dataset(N):
pi = np.array([0.4, 0.6])
mus = [[1, 1], [-1, -1]]
stds = [[0.1, 0.1], [0.1, 0.1]]
x = np.zeros((N, 2), dtype=np.float32)
x = np.zeros((N, 2))
for n in range(N):
k = np.argmax(np.random.multinomial(1, pi))
x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
Expand Down
2 changes: 1 addition & 1 deletion examples/normal_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)
x_data = np.array([0.0] * 50)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0)
Expand Down
2 changes: 1 addition & 1 deletion examples/normal_normal_hmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)
x_data = np.array([0.0] * 50)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0)
Expand Down
2 changes: 1 addition & 1 deletion examples/normal_normal_mh.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)
x_data = np.array([0.0] * 50)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0)
Expand Down
2 changes: 1 addition & 1 deletion examples/normal_normal_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)
x_data = np.array([0.0] * 50)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0, name='mu')
Expand Down
9 changes: 3 additions & 6 deletions examples/normal_sghmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,14 @@
from __future__ import print_function

import edward as ed
import tensorflow as tf
import numpy as np
import tensorflow as tf

from matplotlib import pyplot as plt
from edward.models import Empirical, MultivariateNormalFull

plt.style.use("ggplot")

# Plotting helper function.


def mvn_plot_contours(z, label=False, ax=None):
"""
Expand All @@ -32,7 +31,7 @@ def mvn_plot_contours(z, label=False, ax=None):
xs = np.linspace(xmin, xmax, num=100)
ys = np.linspace(ymin, ymax, num=100)
X, Y = np.meshgrid(xs, ys)
T = tf.convert_to_tensor(np.c_[X.flatten(), Y.flatten()], dtype=tf.float32)
T = tf.cast(np.c_[X.flatten(), Y.flatten()], dtype=tf.float32)
Z = sess.run(tf.exp(z.log_prob(T))).reshape((len(xs), len(ys)))
if ax is None:
fig, ax = plt.subplots()
Expand All @@ -41,7 +40,6 @@ def mvn_plot_contours(z, label=False, ax=None):
plt.clabel(cs, inline=1, fontsize=10)


# Example body.
ed.set_seed(42)

# MODEL
Expand All @@ -62,7 +60,6 @@ def mvn_plot_contours(z, label=False, ax=None):
print("Inferred posterior std:")
print(std)

# VISUALIZATION
fig, ax = plt.subplots()
trace = sess.run(qz.params)
ax.scatter(trace[:, 0], trace[:, 1], marker=".")
Expand Down
4 changes: 2 additions & 2 deletions examples/probabilistic_matrix_factorization.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def get_indicators(N, M, prob_std=0.5):
D = 3 # number of latent factors

# true latent factors
U_true = np.random.randn(D, N).astype(np.float32)
V_true = np.random.randn(D, M).astype(np.float32)
U_true = np.random.randn(D, N)
V_true = np.random.randn(D, M)

# DATA
R_true = build_toy_dataset(U_true, V_true, N, M)
Expand Down
Loading

0 comments on commit e48aa80

Please sign in to comment.