-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathgate.py
executable file
·82 lines (61 loc) · 2.78 KB
/
gate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import tensorflow as tf
class GATE():
def __init__(self, hidden_dims, lambda_):
self.lambda_ = lambda_
self.n_layers = len(hidden_dims) -1
self.W, self.v = self.define_weights(hidden_dims)
self.C = {}
def __call__(self, A, X, R, S):
# Encoder
H = X
for layer in xrange(self.n_layers):
H = self.__encoder(A, H, layer)
# Final node representations
self.H = H
# Decoder
for layer in range(self.n_layers - 1, -1, -1):
H = self.__decoder(H, layer)
X_ = H
# The reconstruction loss of node features
features_loss = tf.sqrt(tf.reduce_sum(tf.reduce_sum(tf.pow(X - X_, 2))))
# The reconstruction loss of the graph structure
self.S_emb = tf.nn.embedding_lookup(self.H, S)
self.R_emb = tf.nn.embedding_lookup(self.H, R)
structure_loss = -tf.log(tf.sigmoid(tf.reduce_sum(self.S_emb * self.R_emb, axis=-1)))
structure_loss = tf.reduce_sum(structure_loss)
# Total loss
self.loss = features_loss + self.lambda_ * structure_loss
return self.loss, self.H, self.C
def __encoder(self, A, H, layer):
H = tf.matmul(H, self.W[layer])
self.C[layer] = self.graph_attention_layer(A, H, self.v[layer], layer)
return tf.sparse_tensor_dense_matmul(self.C[layer], H)
def __decoder(self, H, layer):
H = tf.matmul(H, self.W[layer], transpose_b=True)
return tf.sparse_tensor_dense_matmul(self.C[layer], H)
def define_weights(self, hidden_dims):
W = {}
for i in range(self.n_layers):
W[i] = tf.get_variable("W%s" % i, shape=(hidden_dims[i], hidden_dims[i+1]))
Ws_att = {}
for i in range(self.n_layers):
v = {}
v[0] = tf.get_variable("v%s_0" % i, shape=(hidden_dims[i+1], 1))
v[1] = tf.get_variable("v%s_1" % i, shape=(hidden_dims[i+1], 1))
Ws_att[i] = v
return W, Ws_att
def graph_attention_layer(self, A, M, v, layer):
with tf.variable_scope("layer_%s"% layer):
f1 = tf.matmul(M, v[0])
f1 = A * f1
f2 = tf.matmul(M, v[1])
f2 = A * tf.transpose(f2, [1, 0])
logits = tf.sparse_add(f1, f2)
unnormalized_attentions = tf.SparseTensor(indices=logits.indices,
values=tf.nn.sigmoid(logits.values),
dense_shape=logits.dense_shape)
attentions = tf.sparse_softmax(unnormalized_attentions)
attentions = tf.SparseTensor(indices=attentions.indices,
values=attentions.values,
dense_shape=attentions.dense_shape)
return attentions