Skip to content

Commit

Permalink
Update tutorial_learning2learn.py (#923)
Browse files Browse the repository at this point in the history
Tensorflow issue appeared in learning2learn demo.

The problem has not been solved but it has been decided to revert to non-executable so as not to lock the repository.
  • Loading branch information
KetpuntoG authored Sep 14, 2023
1 parent 0cb3257 commit 8b8c75c
Show file tree
Hide file tree
Showing 2 changed files with 218 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
],
"seoDescription": "Use a classical recurrent neural network to initilize the parameters of a variational quatum algorithm.",
"doi": "",
"canonicalURL": "/qml/demos/tutorial_learning2learn",
"canonicalURL": "/qml/demos/learning2learn",
"references": [],
"basedOnPapers": [],
"referencedByPapers": [],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@
Check out this great tutorial on
how to use QAOA for solving graph problems: https://pennylane.ai/qml/demos/tutorial_qaoa_intro.html
.. note::
Running the tutorial (excluding the Appendix) requires approx. ~13m.
"""

######################################################################
Expand Down Expand Up @@ -191,6 +194,12 @@ def generate_graphs(n_graphs, n_nodes, p_edge):

nx.draw(graphs[0])

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_Graph0.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#

######################################################################
# Variational Quantum Circuit: QAOA
Expand Down Expand Up @@ -253,6 +262,15 @@ def hamiltonian(params, **kwargs):
# Evaluate th QAOA instance just created with some angles.
print(cost(x))

##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# tf.Tensor(-3.193267957255582, shape=(), dtype=float64)
#


######################################################################
Expand Down Expand Up @@ -458,6 +476,46 @@ def train_step(graph_cost):
print(f" > Graph {i+1}/{len(graph_cost_list)} - Loss: {loss[0][0]}")
print(f" >> Mean Loss during epoch: {np.mean(total_loss)}")

##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Epoch 1
# > Graph 1/20 - Loss: -1.6641689538955688
# > Graph 6/20 - Loss: -1.4186843633651733
# > Graph 11/20 - Loss: -1.3757232427597046
# > Graph 16/20 - Loss: -1.294339656829834
# >> Mean Loss during epoch: -1.7352586269378663
# Epoch 2
# > Graph 1/20 - Loss: -2.119091749191284
# > Graph 6/20 - Loss: -1.4789190292358398
# > Graph 11/20 - Loss: -1.3779840469360352
# > Graph 16/20 - Loss: -1.2963457107543945
# >> Mean Loss during epoch: -1.8252217948436738
# Epoch 3
# > Graph 1/20 - Loss: -2.1322619915008545
# > Graph 6/20 - Loss: -1.459418535232544
# > Graph 11/20 - Loss: -1.390620470046997
# > Graph 16/20 - Loss: -1.3165746927261353
# >> Mean Loss during epoch: -1.8328069806098939
# Epoch 4
# > Graph 1/20 - Loss: -2.1432175636291504
# > Graph 6/20 - Loss: -1.476362943649292
# > Graph 11/20 - Loss: -1.3938289880752563
# > Graph 16/20 - Loss: -1.3140206336975098
# >> Mean Loss during epoch: -1.8369774043560028
# Epoch 5
# > Graph 1/20 - Loss: -2.1429405212402344
# > Graph 6/20 - Loss: -1.477513074874878
# > Graph 11/20 - Loss: -1.3909202814102173
# > Graph 16/20 - Loss: -1.315887689590454
# >> Mean Loss during epoch: -1.8371947884559632
#


######################################################################
# As you can see, the Loss for each graph keeps decreasing across epochs,
# indicating that the training routine is working correctly.
Expand All @@ -479,6 +537,14 @@ def train_step(graph_cost):

nx.draw(new_graph)

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_Graph1.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#


######################################################################
# Then we apply the trained RNN to this new graph, saving intermediate
# results coming from all the recurrent iterations in the network.
Expand Down Expand Up @@ -523,6 +589,11 @@ def train_step(graph_cost):
plt.show()

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_LossLSTM.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#
# That’s remarkable! The RNN learned to propose new parameters such that
# the MaxCut cost is minimized very rapidly: in just a few iterations the
# loss reaches a minimum. Actually, it takes just a single step for the LSTM
Expand Down Expand Up @@ -569,6 +640,30 @@ def train_step(graph_cost):
print(f"Final cost function: {new_cost(x).numpy()}\nOptimized angles: {x.numpy()}")

##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Step 1 - Loss = -4.1700805
# Step 2 - Loss = -4.67503588
# Step 3 - Loss = -5.09949909
# Step 4 - Loss = -5.40388533
# Step 5 - Loss = -5.59529203
# Step 6 - Loss = -5.70495197
# Step 7 - Loss = -5.7642561
# Step 8 - Loss = -5.79533198
# Step 9 - Loss = -5.81138752
# Step 10 - Loss = -5.81966529
# Step 11 - Loss = -5.82396722
# Step 12 - Loss = -5.82624537
# Step 13 - Loss = -5.82749126
# Step 14 - Loss = -5.82820626
# Step 15 - Loss = -5.82864379
# Final cost function: -5.828932361904984
# Optimized angles: [[ 0.5865477 ]
# [-0.3228858]]
#

fig, ax = plt.subplots()
Expand All @@ -584,6 +679,13 @@ def train_step(graph_cost):
ax.set_xticks([0, 5, 10, 15, 20]);
plt.show()

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_LossConfrontation.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#


######################################################################
# *Hurray!* 🎉🎉
Expand Down Expand Up @@ -750,6 +852,30 @@ def train_step(graph_cost):
print(f" > Graph {i+1}/{len(gs_cost_list)} - Loss: {loss}")
print(f" >> Mean Loss during epoch: {np.mean(total_loss)}")

##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Epoch 1
# > Graph 1/15 - Loss: [[-1.4876363]]
# > Graph 6/15 - Loss: [[-1.8590403]]
# > Graph 11/15 - Loss: [[-1.7644017]]
# >> Mean Loss during epoch: -1.9704322338104248
# Epoch 2
# > Graph 1/15 - Loss: [[-1.8650053]]
# > Graph 6/15 - Loss: [[-1.9578737]]
# > Graph 11/15 - Loss: [[-1.8377447]]
# >> Mean Loss during epoch: -2.092947308222453
# Epoch 3
# > Graph 1/15 - Loss: [[-1.9009062]]
# > Graph 6/15 - Loss: [[-1.9726204]]
# > Graph 11/15 - Loss: [[-1.8668792]]
# >> Mean Loss during epoch: -2.1162660201390584
#


######################################################################
# Let’s check if this hybrid model eventually learned a good heuristic to
Expand All @@ -766,6 +892,14 @@ def train_step(graph_cost):

nx.draw(new_graph)

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_Graph10.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#


######################################################################
# We call the trained recurrent LSTM on this graph, saving not only the
# last, but all intermediate guesses for the parameters.
Expand Down Expand Up @@ -799,6 +933,14 @@ def train_step(graph_cost):
ax.set_xticks([0, 5, 10, 15, 20]);
plt.show()

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_LossGeneralization.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#


######################################################################
# Again, we can confirm that the custom optimizer based on the LSTM quickly reaches a good
# value of the loss function, and also achieve good generalization performances, since
Expand Down Expand Up @@ -846,6 +988,13 @@ def train_step(graph_cost):
plt.show()

######################################################################
# .. figure:: ../demonstrations/learning2learn/rendered_LossLandscape.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#
#
#
#
# Ideas for creating a Keras Layer and Keras Model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down Expand Up @@ -916,6 +1065,54 @@ def call(self, inputs):

model.summary()


##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Model: "functional_1"
# __________________________________________________________________________________________________
# Layer (type) Output Shape Param # Connected to
# ==================================================================================================
# input_1 (InputLayer) [(None, 1)] 0
# __________________________________________________________________________________________________
# input_2 (InputLayer) [(None, 2)] 0
# __________________________________________________________________________________________________
# input_3 (InputLayer) [(None, 2)] 0
# __________________________________________________________________________________________________
# input_4 (InputLayer) [(None, 2)] 0
# __________________________________________________________________________________________________
# qrnn (QRNN) [(1, 1), 48 input_1[0][0]
# (None, 2), input_2[0][0]
# (None, 2), input_3[0][0]
# (None, 2)] input_4[0][0]
# qrnn[0][0]
# qrnn[0][1]
# qrnn[0][2]
# qrnn[0][3]
# qrnn[1][0]
# qrnn[1][1]
# qrnn[1][2]
# qrnn[1][3]
# __________________________________________________________________________________________________
# tf.math.multiply (TFOpLambda) (1, 1) 0 qrnn[0][0]
# __________________________________________________________________________________________________
# tf.math.multiply_1 (TFOpLambda) (1, 1) 0 qrnn[1][0]
# __________________________________________________________________________________________________
# tf.math.multiply_2 (TFOpLambda) (1, 1) 0 qrnn[2][0]
# __________________________________________________________________________________________________
# average_147 (Average) (1, 1) 0 tf.math.multiply[0][0]
# tf.math.multiply_1[0][0]
# tf.math.multiply_2[0][0]
# ==================================================================================================
# Total params: 48
# Trainable params: 48
# Non-trainable params: 0
#

######################################################################
# A basic training routine for the ``Keras Model`` just created:
#
Expand Down Expand Up @@ -948,6 +1145,26 @@ def call(self, inputs):
for t, s in zip(pred, ["out0", "out1", "out2", "Loss"]):
print(f" >{s}: {t.numpy()}")

##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Step 1 - Loss = [[-1.5563084]] - Cost = -4.762684301954701
# Step 2 - Loss = [[-1.5649065]] - Cost = -4.799981173473755
# Step 3 - Loss = [[-1.5741502]] - Cost = -4.840036354736862
# Step 4 - Loss = [[-1.5841404]] - Cost = -4.883246647056216
# Step 5 - Loss = [[-1.5948243]] - Cost = -4.929228976649736
# Final Loss: [[-1.5948243]]
# Final Outs:
# >out0: [[-0.01041588 0.01016874]]
# >out1: [[-0.04530389 0.38148248]]
# >out2: [[-0.10258182 0.4134117 ]]
# >Loss: [[-1.5948243]]
#

######################################################################
# .. note::
# This code works only for a single graph at a time, since a graph was
Expand Down

0 comments on commit 8b8c75c

Please sign in to comment.