Skip to content

Commit

Permalink
Update .ipynb files to what scripts/format_ipynb.py produces
Browse files Browse the repository at this point in the history
After updating nbformat, running scripts/format_ipynb.py changed the
contents of all the ipython notebook files. Most frustratingly, one of
the changes is that the indentation of the .ipynb files changed,
making the diffs particularly noisy. Spot-inspecting files manually
reveals a number of real changes, some being worthwhile (e.g., adding
missing spaces in argument lists) and some being just differences in
formatting such as where line breaks are introduced. The latter set
of changes is puzzling because the style settings haven't changed. I
tried but couldn't find a way to avoid these changes except by
outright changing the style parameters, but then, that would mean the
TFQ .ipynb file formatting would become non-standard, and that seems
worse. So, for lack of a better solution, I'm checking in all the
reformatting notebooks with the hope that future versions of yapf and
nbformat don't keep introducing more .ipynb format changes.
  • Loading branch information
mhucka committed Dec 12, 2024
1 parent 02b047b commit 363022a
Show file tree
Hide file tree
Showing 9 changed files with 6,209 additions and 6,060 deletions.
1,049 changes: 525 additions & 524 deletions docs/tutorials/barren_plateaus.ipynb

Large diffs are not rendered by default.

1,653 changes: 827 additions & 826 deletions docs/tutorials/gradients.ipynb

Large diffs are not rendered by default.

2,672 changes: 1,338 additions & 1,334 deletions docs/tutorials/hello_many_worlds.ipynb

Large diffs are not rendered by default.

2,268 changes: 1,137 additions & 1,131 deletions docs/tutorials/mnist.ipynb

Large diffs are not rendered by default.

1,637 changes: 834 additions & 803 deletions docs/tutorials/noise.ipynb

Large diffs are not rendered by default.

2,421 changes: 1,211 additions & 1,210 deletions docs/tutorials/qcnn.ipynb

Large diffs are not rendered by default.

235 changes: 130 additions & 105 deletions docs/tutorials/quantum_data.ipynb

Large diffs are not rendered by default.

239 changes: 148 additions & 91 deletions docs/tutorials/quantum_reinforcement_learning.ipynb

Large diffs are not rendered by default.

95 changes: 59 additions & 36 deletions docs/tutorials/research_tools.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -86,22 +86,23 @@
"!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3 tensorboard_plugin_profile==2.15.0"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "4Ql5PW-ACO0J"
},
"outputs": [],
"source": [
"# Update package resources to account for version changes.\n",
"import importlib, pkg_resources\n",
"importlib.reload(pkg_resources)"
]
},
{
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "4Ql5PW-ACO0J"
},
"outputs": [],
"source": [
"# Update package resources to account for version changes.\n",
"import importlib, pkg_resources\n",
"\n",
"importlib.reload(pkg_resources)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
Expand Down Expand Up @@ -159,9 +160,11 @@
" qubits, depth=2)\n",
" return random_circuit\n",
"\n",
"\n",
"def generate_data(circuit, n_samples):\n",
" \"\"\"Draw n_samples samples from circuit into a tf.Tensor.\"\"\"\n",
" return tf.squeeze(tfq.layers.Sample()(circuit, repetitions=n_samples).to_tensor())"
" return tf.squeeze(tfq.layers.Sample()(circuit,\n",
" repetitions=n_samples).to_tensor())"
]
},
{
Expand Down Expand Up @@ -270,16 +273,20 @@
" \"\"\"Convert tensor of bitstrings to tensor of ints.\"\"\"\n",
" sigs = tf.constant([1 << i for i in range(N_QUBITS)], dtype=tf.int32)\n",
" rounded_bits = tf.clip_by_value(tf.math.round(\n",
" tf.cast(bits, dtype=tf.dtypes.float32)), clip_value_min=0, clip_value_max=1)\n",
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32), sigs)\n",
" tf.cast(bits, dtype=tf.dtypes.float32)),\n",
" clip_value_min=0,\n",
" clip_value_max=1)\n",
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32),\n",
" sigs)\n",
"\n",
"\n",
"@tf.function\n",
"def xeb_fid(bits):\n",
" \"\"\"Compute linear XEB fidelity of bitstrings.\"\"\"\n",
" final_probs = tf.squeeze(\n",
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor()) ** 2)\n",
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor())**2)\n",
" nums = bits_to_ints(bits)\n",
" return (2 ** N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
" return (2**N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
]
},
{
Expand Down Expand Up @@ -334,6 +341,8 @@
"outputs": [],
"source": [
"LATENT_DIM = 100\n",
"\n",
"\n",
"def make_generator_model():\n",
" \"\"\"Construct generator model.\"\"\"\n",
" model = tf.keras.Sequential()\n",
Expand All @@ -345,6 +354,7 @@
"\n",
" return model\n",
"\n",
"\n",
"def make_discriminator_model():\n",
" \"\"\"Constrcut discriminator model.\"\"\"\n",
" model = tf.keras.Sequential()\n",
Expand Down Expand Up @@ -387,17 +397,21 @@
"outputs": [],
"source": [
"cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n",
"\n",
"\n",
"def discriminator_loss(real_output, fake_output):\n",
" \"\"\"Compute discriminator loss.\"\"\"\n",
" real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n",
" fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n",
" total_loss = real_loss + fake_loss\n",
" return total_loss\n",
"\n",
"\n",
"def generator_loss(fake_output):\n",
" \"\"\"Compute generator loss.\"\"\"\n",
" return cross_entropy(tf.ones_like(fake_output), fake_output)\n",
"\n",
"\n",
"generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
"discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)"
]
Expand All @@ -410,7 +424,8 @@
},
"outputs": [],
"source": [
"BATCH_SIZE=256\n",
"BATCH_SIZE = 256\n",
"\n",
"\n",
"@tf.function\n",
"def train_step(images):\n",
Expand All @@ -425,8 +440,8 @@
" gen_loss = generator_loss(fake_output)\n",
" disc_loss = discriminator_loss(real_output, fake_output)\n",
"\n",
" gradients_of_generator = gen_tape.gradient(\n",
" gen_loss, generator.trainable_variables)\n",
" gradients_of_generator = gen_tape.gradient(gen_loss,\n",
" generator.trainable_variables)\n",
" gradients_of_discriminator = disc_tape.gradient(\n",
" disc_loss, discriminator.trainable_variables)\n",
"\n",
Expand Down Expand Up @@ -480,29 +495,37 @@
"def train(dataset, epochs, start_epoch=1):\n",
" \"\"\"Launch full training run for the given number of epochs.\"\"\"\n",
" # Log original training distribution.\n",
" tf.summary.histogram('Training Distribution', data=bits_to_ints(dataset), step=0)\n",
" tf.summary.histogram('Training Distribution',\n",
" data=bits_to_ints(dataset),\n",
" step=0)\n",
"\n",
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(N_SAMPLES).batch(512)\n",
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(\n",
" N_SAMPLES).batch(512)\n",
" t = time.time()\n",
" for epoch in range(start_epoch, start_epoch + epochs):\n",
" for i, image_batch in enumerate(batched_data):\n",
" # Log batch-wise loss.\n",
" gl, dl = train_step(image_batch)\n",
" tf.summary.scalar(\n",
" 'Generator loss', data=gl, step=epoch * len(batched_data) + i)\n",
" tf.summary.scalar(\n",
" 'Discriminator loss', data=dl, step=epoch * len(batched_data) + i)\n",
" tf.summary.scalar('Generator loss',\n",
" data=gl,\n",
" step=epoch * len(batched_data) + i)\n",
" tf.summary.scalar('Discriminator loss',\n",
" data=dl,\n",
" step=epoch * len(batched_data) + i)\n",
"\n",
" # Log full dataset XEB Fidelity and generated distribution.\n",
" generated_samples = generator(tf.random.normal([N_SAMPLES, 100]))\n",
" tf.summary.scalar(\n",
" 'Generator XEB Fidelity Estimate', data=xeb_fid(generated_samples), step=epoch)\n",
" tf.summary.histogram(\n",
" 'Generator distribution', data=bits_to_ints(generated_samples), step=epoch)\n",
" tf.summary.scalar('Generator XEB Fidelity Estimate',\n",
" data=xeb_fid(generated_samples),\n",
" step=epoch)\n",
" tf.summary.histogram('Generator distribution',\n",
" data=bits_to_ints(generated_samples),\n",
" step=epoch)\n",
" # Log new samples drawn from this particular random circuit.\n",
" random_new_distribution = generate_data(REFERENCE_CIRCUIT, N_SAMPLES)\n",
" tf.summary.histogram(\n",
" 'New round of True samples', data=bits_to_ints(random_new_distribution), step=epoch)\n",
" tf.summary.histogram('New round of True samples',\n",
" data=bits_to_ints(random_new_distribution),\n",
" step=epoch)\n",
"\n",
" if epoch % 10 == 0:\n",
" print('Epoch {}, took {}(s)'.format(epoch, time.time() - t))\n",
Expand Down

0 comments on commit 363022a

Please sign in to comment.