From 8acba9b16383590caaec7710f0515ae2d9807bbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20P=2E=20Moutinho?= <56390829+jpmoutinho@users.noreply.github.com> Date: Wed, 18 Oct 2023 12:29:28 +0200 Subject: [PATCH] [Docs] Feature maps and QML reorganization (#92) --- docs/development/draw.md | 2 +- docs/qml/index.md | 46 +++-- docs/qml/ml_tools.md | 213 +++++++++++++++++++++ docs/qml/qaoa.md | 2 +- docs/qml/qcl.md | 2 +- docs/qml/qml_constructors.md | 191 +++++++++++++++++++ docs/qml/qml_tools.md | 324 -------------------------------- docs/tutorials/parameters.md | 2 +- docs/tutorials/quantummodels.md | 2 +- mkdocs.yml | 3 +- 10 files changed, 438 insertions(+), 349 deletions(-) create mode 100644 docs/qml/ml_tools.md create mode 100644 docs/qml/qml_constructors.md delete mode 100644 docs/qml/qml_tools.md diff --git a/docs/development/draw.md b/docs/development/draw.md index 803101bf..032007e9 100644 --- a/docs/development/draw.md +++ b/docs/development/draw.md @@ -58,7 +58,7 @@ print(html_string(block)) # markdown-exec: hide ```python exec="on" source="material-block" html="1" from qadence import feature_map, hea, chain -block = chain(feature_map(4, fm_type="tower"), hea(4,2)) +block = chain(feature_map(4, reupload_scaling="Tower"), hea(4,2)) from qadence.draw import html_string # markdown-exec: hide print(html_string(block)) # markdown-exec: hide ``` diff --git a/docs/qml/index.md b/docs/qml/index.md index d235ef70..0945a80d 100644 --- a/docs/qml/index.md +++ b/docs/qml/index.md @@ -1,15 +1,20 @@ -Variational algorithms on noisy devices and quantum machine learning (QML) [^1] in particular are -the target applications for Qadence. For this purpose, the -library offers both flexible symbolic expressions for the -quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more -details) and native automatic differentiation via integration with -[PyTorch](https://pytorch.org/) deep learning framework. +Variational algorithms on noisy devices and quantum machine learning (QML)[^1] in particular are one of the main +target applications for Qadence. For this purpose, the library offers both flexible symbolic expressions for the +quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more details) and native automatic +differentiation via integration with [PyTorch](https://pytorch.org/) deep learning framework. + +Furthermore, Qadence offers a wide range of utilities for helping building and researching quantum machine learning algorithms, including: + +* [a set of constructors](qml_constructors.md) for circuits commonly used in quantum machine learning such as feature maps and ansatze +* [a set of tools](ml_tools) for training and optimizing quantum neural networks and loading classical data into a QML algorithm + +## Some simple examples Qadence symbolic parameter interface allows to create arbitrary feature maps to encode classical data into quantum circuits with an arbitrary non-linear function embedding for the input values: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" import qadence as qd from qadence.operations import * import torch @@ -17,52 +22,55 @@ from sympy import acos n_qubits = 4 +# Example feature map, also directly available with the `feature_map` function fp = qd.FeatureParameter("phi") -feature_map = qd.kron(RX(i, 2 * acos(fp)) for i in range(n_qubits)) +fm = qd.kron(RX(i, acos(fp)) for i in range(n_qubits)) # the key in the dictionary must correspond to # the name of the assigned to the feature parameter inputs = {"phi": torch.rand(3)} -samples = qd.sample(feature_map, values=inputs) -print(samples) +samples = qd.sample(fm, values=inputs) +print(f"samples = {samples[0]}") # markdown-exec: hide ``` The [`constructors.feature_map`][qadence.constructors.feature_map] module provides convenience functions to build commonly used feature maps where the input parameter -is encoded in the single-qubit gates rotation angle. +is encoded in the single-qubit gates rotation angle. This function will be further +demonstrated in the [QML constructors tutorial](qml_constructors.md). Furthermore, Qadence is natively integrated with PyTorch automatic differentiation engine thus Qadence quantum models can be used seamlessly in a PyTorch workflow. Let's create a quantum neural network model using the feature map just defined, a -digital-analog variational ansatz and a simple observable $X(0) \otimes X(1)$. We -use the convenience `QNN` quantum model abstraction. +digital-analog variational ansatz ([also explained here](qml_constructors.md)) and a +simple observable $X(0) \otimes X(1)$. We use the convenience `QNN` quantum model abstraction. ```python exec="on" source="material-block" result="json" session="qml" ansatz = qd.hea(n_qubits, strategy="sDAQC") -circuit = qd.QuantumCircuit(n_qubits, feature_map, ansatz) +circuit = qd.QuantumCircuit(n_qubits, fm, ansatz) observable = qd.kron(X(0), X(1)) model = qd.QNN(circuit, observable) # NOTE: the `QNN` is a torch.nn.Module assert isinstance(model, torch.nn.Module) +print(isinstance(model, torch.nn.Module)) # markdown-exec: hide ``` Differentiation works the same way as any other PyTorch module: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" values = {"phi": torch.rand(10, requires_grad=True)} # the forward pass of the quantum model returns the expectation # value of the input observable out = model(values) -print(f"Quantum model output: {out}") +print(f"Quantum model output: \n{out}\n") # markdown-exec: hide # you can compute the gradient with respect to inputs using # PyTorch autograd differentiation engine dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] -print(f"First-order derivative w.r.t. the feature parameter: {dout}") +print(f"First-order derivative w.r.t. the feature parameter: \n{dout}") # you can also call directly a backward pass to compute derivatives with respect # to the variational parameters and use it for implementing variational @@ -74,12 +82,12 @@ To run QML on real devices, Qadence offers generalized parameter shift rules (GP for arbitrary quantum operations which can be selected when constructing the `QNN` model: -```python exec="on" source="material-block" html="1" result="json" session="qml" +```python exec="on" source="material-block" result="json" session="qml" model = qd.QNN(circuit, observable, diff_mode="gpsr") out = model(values) dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] -print(f"First-order derivative w.r.t. the feature parameter: {dout}") +print(f"First-order derivative w.r.t. the feature parameter: \n{dout}") ``` See [here](../advanced_tutorials/differentiability.md) for more details on how the parameter diff --git a/docs/qml/ml_tools.md b/docs/qml/ml_tools.md new file mode 100644 index 00000000..6d7d717b --- /dev/null +++ b/docs/qml/ml_tools.md @@ -0,0 +1,213 @@ +## Dataloaders + +When using Qadence, you can supply classical data to a quantum machine learning +algorithm by using a standard PyTorch `DataLoader` instance. Qadence also provides +the `DictDataLoader` convenience class which allows +to build dictionaries of `DataLoader`s instances and easily iterate over them. + +```python exec="on" source="material-block" +import torch +from torch.utils.data import DataLoader, TensorDataset +from qadence.ml_tools import DictDataLoader + +def dataloader() -> DataLoader: + batch_size = 5 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + + dataset = TensorDataset(x, y) + return DataLoader(dataset, batch_size=batch_size) + + +def dictdataloader() -> DictDataLoader: + batch_size = 5 + + keys = ["y1", "y2"] + dls = {} + for k in keys: + x = torch.rand(batch_size, 1) + y = torch.sin(x) + dataset = TensorDataset(x, y) + dataloader = DataLoader(dataset, batch_size=batch_size) + dls[k] = dataloader + + return DictDataLoader(dls) + +n_epochs = 2 + +# iterate standard DataLoader +dl = dataloader() +for i in range(n_epochs): + data = next(iter(dl)) + +# iterate DictDataLoader +ddl = dictdataloader() +for i in range(n_epochs): + data = next(iter(ddl)) + +``` + +## Optimization routines + +For training QML models, Qadence also offers a few out-of-the-box routines for optimizing differentiable +models, _e.g._ `QNN`s and `QuantumModel`, containing either *trainable* and/or *non-trainable* parameters +(see [the parameters tutorial](../tutorials/parameters.md) for detailed information about parameter types): + +* [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers +* [`train_gradient_free`][qadence.ml_tools.train_gradient_free] for gradient-free optimization using +the [Nevergrad](https://facebookresearch.github.io/nevergrad/) library. + +These routines performs training, logging/printing loss metrics and storing intermediate checkpoints of models. In the following, we +use `train_with_grad` as example but the code can be used directly with the gradient-free routine. + +As every other training routine commonly used in Machine Learning, it requires +`model`, `data` and an `optimizer` as input arguments. +However, in addition, it requires a `loss_fn` and a `TrainConfig`. +A `loss_fn` is required to be a function which expects both a model and data and returns a tuple of (loss, metrics: ``), where `metrics` is a dict of scalars which can be customized too. + +```python exec="on" source="material-block" +import torch +from itertools import count +cnt = count() +criterion = torch.nn.MSELoss() + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +``` + +The [`TrainConfig`][qadence.ml_tools.config.TrainConfig] tells `train_with_grad` what batch_size should be used, +how many epochs to train, in which intervals to print/log metrics and how often to store intermediate checkpoints. + +```python exec="on" source="material-block" +from qadence.ml_tools import TrainConfig + +batch_size = 5 +n_epochs = 100 + +config = TrainConfig( + folder="some_path/", + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) +``` + +Let's see it in action with a simple example. + +### Fitting a funtion with a QNN using `ml_tools` + +Let's look at a complete example of how to use `train_with_grad` now. + +```python exec="on" source="material-block" html="1" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig +import matplotlib.pyplot as plt + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +cnt = count() +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +tmp_path = Path("/tmp") + +n_epochs = 50 + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +batch_size = 25 + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + +plt.clf() # markdown-exec: hide +plt.plot(x.numpy(), y.numpy()) +plt.plot(x.numpy(), model(x).detach().numpy()) +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide +``` + +For users who want to use the low-level API of `qadence`, here is the example from above +written without `train_with_grad`. + +### Fitting a function - Low-level API + +```python exec="on" source="material-block" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) +n_epochs=50 +cnt = count() + +tmp_path = Path("/tmp") + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +for i in range(n_epochs): + out = model(x) + loss = criterion(out, y) + loss.backward() + optimizer.step() +``` diff --git a/docs/qml/qaoa.md b/docs/qml/qaoa.md index 5dde67b9..e6a57f0a 100644 --- a/docs/qml/qaoa.md +++ b/docs/qml/qaoa.md @@ -140,7 +140,7 @@ for i in range(n_epochs): ``` Qadence offers some convenience functions to implement this training loop with advanced -logging and metrics track features. You can refer to [this](../qml/qml_tools.md) for more details. +logging and metrics track features. You can refer to [this tutorial](../qml/ml_tools.md) for more details. ## Results diff --git a/docs/qml/qcl.md b/docs/qml/qcl.md index 745bdd6f..b1239ba7 100644 --- a/docs/qml/qcl.md +++ b/docs/qml/qcl.md @@ -114,7 +114,7 @@ assert loss.item() < 1e-3 ``` Qadence offers some convenience functions to implement this training loop with advanced -logging and metrics track features. You can refer to [this](../qml/qml_tools.md) for more details. +logging and metrics track features. You can refer to [this tutorial](../qml/ml_tools.md) for more details. The quantum model is now trained on the training data points. To determine the quality of the results, one can check to see how well it fits the function on the test set. diff --git a/docs/qml/qml_constructors.md b/docs/qml/qml_constructors.md new file mode 100644 index 00000000..773ca6a2 --- /dev/null +++ b/docs/qml/qml_constructors.md @@ -0,0 +1,191 @@ +# Quantum machine learning constructors + +Besides the [arbitrary Hamiltonian constructors](../tutorials/hamiltonians.md), Qadence also provides a complete set of +program constructors useful for digital-analog quantum machine learning programs. + +## Feature maps + +The `feature_map` function can easily create several types of data-encoding blocks. The +two main types of feature maps use a Fourier basis or a Chebyshev basis. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import feature_map, BasisSet, chain +from qadence.draw import display + +n_qubits = 3 + +fourier_fm = feature_map(n_qubits, fm_type=BasisSet.FOURIER) + +chebyshev_fm = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV) + +block = chain(fourier_fm, chebyshev_fm) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +A custom encoding function can also be passed with `sympy` + +```python exec="on" source="material-block" html="1" session="fms" +from sympy import asin, Function + +n_qubits = 3 + +# Using a pre-defined sympy Function +custom_fm_0 = feature_map(n_qubits, fm_type=asin) + +# Creating a custom sub-class of Function +class custom_func(Function): + @classmethod + def eval(cls, x): + return asin(x) + x**2 + +custom_fm_1 = feature_map(n_qubits, fm_type=custom_func) + +block = chain(custom_fm_0, custom_fm_1) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +Furthermore, the `reupload_scaling` argument can be used to change the scaling applied to each qubit +in the support of the feature map. The default scalings can be chosen from the `ReuploadScaling` enumeration. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import ReuploadScaling +from qadence.draw import display + +n_qubits = 5 + +# Default constant value +fm_constant = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.CONSTANT) + +# Linearly increasing scaling +fm_tower = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.TOWER) + +# Exponentially increasing scaling +fm_exp = feature_map(n_qubits, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.EXP) + +block = chain(fm_constant, fm_tower, fm_exp) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="6,4")) # markdown-exec: hide +``` + +A custom scaling can also be defined with a function with an `int` input and `int` or `float` output. + +```python exec="on" source="material-block" html="1" session="fms" +n_qubits = 5 + +def custom_scaling(i: int) -> int | float: + """Sqrt(i+1)""" + return (i+1) ** (0.5) + +# Custom scaling function +fm_custom = feature_map(n_qubits, fm_type=BasisSet.CHEBYSHEV, reupload_scaling=custom_scaling) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(fm_custom, size="6,4")) # markdown-exec: hide +``` + +A full description of the remaining arguments can be found in the [`feature_map` API reference][qadence.constructors.feature_map]. We provide an example below. + +```python exec="on" source="material-block" html="1" session="fms" +from qadence import RY + +n_qubits = 5 + +# Custom scaling function +fm_full = feature_map( + n_qubits = n_qubits, + support = tuple(reversed(range(n_qubits))), # Reverse the qubit support to run the scaling from bottom to top + param = "x", # Change the name of the parameter + op = RY, # Change the rotation gate between RX, RY, RZ or PHASE + fm_type = BasisSet.CHEBYSHEV, + reupload_scaling = ReuploadScaling.EXP, + feature_range = (-1.0, 2.0), # Range from which the input data comes from + target_range = (1.0, 3.0), # Range the encoder assumes as the natural range + multiplier = 5.0 # Extra multiplier, which can also be a Parameter +) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(fm_full, size="6,4")) # markdown-exec: hide +``` + +## Hardware-efficient ansatz + +Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). +Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, +the digital version is returned: + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hea +from qadence.draw import display + +n_qubits = 3 +depth = 2 + +ansatz = hea(n_qubits, depth) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +As seen above, the rotation layers are automatically parameterized, and the prefix `"theta"` can be changed with the `param_prefix` argument. + +Furthermore, both the single-qubit rotations and the two-qubit entangler can be customized with the `operations` and `entangler` argument. The operations can be passed as a list of single-qubit rotations, while the entangler should be either `CNOT`, `CZ`, `CRX`, `CRY`, `CRZ` or `CPHASE`. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import RX, RY, CPHASE + +ansatz = hea( + n_qubits=n_qubits, + depth=depth, + param_prefix="phi", + operations=[RX, RY, RX], + entangler=CPHASE +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like $NN$ interactions and constant interaction strength, + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import Strategy + +ansatz = hea( + n_qubits, + depth=depth, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` + +Note that, by default, only the time-parameter is automatically parameterized when building a digital-analog HEA. However, as described in the [Hamiltonians tutorial](../tutorials/hamiltonians.md), arbitrary interaction Hamiltonians can be easily built with the `hamiltonian_factory` function, with both customized or fully parameterized interactions, and these can be directly passed as the `entangler` for a customizable digital-analog HEA. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hamiltonian_factory, Interaction, N, Register, hea + +# Build a parameterized neutral-atom Hamiltonian following a honeycomb_lattice: +register = Register.honeycomb_lattice(1, 1) + +entangler = hamiltonian_factory( + register, + interaction=Interaction.NN, + detuning=N, + interaction_strength="e", + detuning_strength="n" +) + +# Build a fully parameterized Digital-Analog HEA: +n_qubits = register.n_qubits +depth = 2 + +ansatz = hea( + n_qubits=register.n_qubits, + depth=depth, + operations=[RX, RY, RX], + entangler=entangler, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="8,4")) # markdown-exec: hide +``` diff --git a/docs/qml/qml_tools.md b/docs/qml/qml_tools.md deleted file mode 100644 index 81d164bb..00000000 --- a/docs/qml/qml_tools.md +++ /dev/null @@ -1,324 +0,0 @@ -Qadence offers a wide range of utilities for helping building and researching -quantum machine learning algorithms, including: - -* a set of constructors for circuits commonly used in quantum machine learning -* a set of tools for optimizing quantum neural networks and loading classical data into a QML algorithm - -## Quantum machine learning constructors - -Besides the [arbitrary Hamiltonian constructors](../tutorials/hamiltonians.md), Qadence also provides a complete set of -program constructors useful for digital-analog quantum machine learning programs. - -### Feature maps - -A few feature maps are directly available for loading classical data into quantum circuits by encoding them -into gate rotation angles. - -```python exec="on" source="material-block" result="json" session="fms" -from qadence import feature_map - -n_qubits = 3 - -fm = feature_map(n_qubits, fm_type="fourier") -print(f"Fourier = {fm}") # markdown-exec: hide - -fm = feature_map(n_qubits, fm_type="chebyshev") -print(f"Chebyshev {fm}") # markdown-exec: hide - -fm = feature_map(n_qubits, fm_type="tower") -print(f"Tower {fm}") # markdown-exec: hide -``` - -### Hardware-efficient ansatz - -Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). -Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, -the digital version is returned: - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import hea -from qadence.draw import display - -n_qubits = 3 -depth = 2 - -ansatz = hea(n_qubits, depth) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -As seen above, the rotation layers are automatically parameterized, and the prefix `"theta"` can be changed with the `param_prefix` argument. - -Furthermore, both the single-qubit rotations and the two-qubit entangler can be customized with the `operations` and `entangler` argument. The operations can be passed as a list of single-qubit rotations, while the entangler should be either `CNOT`, `CZ`, `CRX`, `CRY`, `CRZ` or `CPHASE`. - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import RX, RY, CPHASE - -ansatz = hea( - n_qubits=n_qubits, - depth=depth, - param_prefix="phi", - operations=[RX, RY, RX], - entangler=CPHASE -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as natively implemented in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like NN interactions and constant interaction strength, - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import Strategy - -ansatz = hea( - n_qubits, - depth=depth, - strategy=Strategy.SDAQC -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -Note that, by default, only the time-parameter is automatically parameterized when building a digital-analog HEA. However, as described in the [Hamiltonians tutorial](../tutorials/hamiltonians.md), arbitrary interaction Hamiltonians can be easily built with the `hamiltonian_factory` function, with both customized or fully parameterized interactions, and these can be directly passed as the `entangler` for a customizable digital-analog HEA. - -```python exec="on" source="material-block" html="1" session="ansatz" -from qadence import hamiltonian_factory, Interaction, N, Register, hea - -# Build a parameterized neutral-atom Hamiltonian following a honeycomb_lattice: -register = Register.honeycomb_lattice(1, 1) - -entangler = hamiltonian_factory( - register, - interaction=Interaction.NN, - detuning=N, - interaction_strength="e", - detuning_strength="n" -) - -# Build a fully parameterized Digital-Analog HEA: -n_qubits = register.n_qubits -depth = 2 - -ansatz = hea( - n_qubits=register.n_qubits, - depth=depth, - operations=[RX, RY, RX], - entangler=entangler, - strategy=Strategy.SDAQC -) -from qadence.draw import html_string # markdown-exec: hide -print(html_string(ansatz, size="4,4")) # markdown-exec: hide -``` - -## Machine Learning Tools - -### Dataloaders - -When using `qadence`, you can supply classical data to a quantum machine learning -algorithm by using a standard PyTorch `DataLoader` instance. Qadence also provides -the `DictDataLoader` convenience class which allows -to build dictionaries of `DataLoader`s instances and easily iterate over them. - -```python exec="on" source="material-block" result="json" -import torch -from torch.utils.data import DataLoader, TensorDataset -from qadence.ml_tools import DictDataLoader - -def dataloader() -> DataLoader: - batch_size = 5 - x = torch.linspace(0, 1, batch_size).reshape(-1, 1) - y = torch.sin(x) - - dataset = TensorDataset(x, y) - return DataLoader(dataset, batch_size=batch_size) - - -def dictdataloader() -> DictDataLoader: - batch_size = 5 - - keys = ["y1", "y2"] - dls = {} - for k in keys: - x = torch.rand(batch_size, 1) - y = torch.sin(x) - dataset = TensorDataset(x, y) - dataloader = DataLoader(dataset, batch_size=batch_size) - dls[k] = dataloader - - return DictDataLoader(dls) - -n_epochs = 2 - -# iterate standard DataLoader -dl = dataloader() -for i in range(n_epochs): - data = next(iter(dl)) - -# iterate DictDataLoader -ddl = dictdataloader() -for i in range(n_epochs): - data = next(iter(ddl)) - -``` - -### Optimization routines - -For training QML models, `qadence` also offers a few out-of-the-box routines for optimizing differentiable -models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters -(you can refer to [this](../tutorials/parameters) for a refresh about different parameter types): - -* [`train_with_grad`][qadence.ml_tools.train_with_grad] for gradient-based optimization using PyTorch native optimizers -* [`train_gradient_free`][qadence.ml_tools.train_gradient_free] for gradient-free optimization using -the [Nevergrad](https://facebookresearch.github.io/nevergrad/) library. - -These routines performs training, logging/printing loss metrics and storing intermediate checkpoints of models. In the following, we -use `train_with_grad` as example but the code can be used directly with the gradient-free routine. - -As every other training routine commonly used in Machine Learning, it requires -`model`, `data` and an `optimizer` as input arguments. -However, in addition, it requires a `loss_fn` and a `TrainConfig`. -A `loss_fn` is required to be a function which expects both a model and data and returns a tuple of (loss, metrics: ``), where `metrics` is a dict of scalars which can be customized too. - -```python exec="on" source="material-block" result="json" -import torch -from itertools import count -cnt = count() -criterion = torch.nn.MSELoss() - -def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: - next(cnt) - x, y = data[0], data[1] - out = model(x) - loss = criterion(out, y) - return loss, {} - -``` - -The [`TrainConfig`][qadence.ml_tools.config.TrainConfig] tells `train_with_grad` what batch_size should be used, -how many epochs to train, in which intervals to print/log metrics and how often to store intermediate checkpoints. - -```python exec="on" source="material-block" result="json" -from qadence.ml_tools import TrainConfig - -batch_size = 5 -n_epochs = 100 - -config = TrainConfig( - folder="some_path/", - max_iter=n_epochs, - checkpoint_every=100, - write_every=100, - batch_size=batch_size, -) -``` - -Let's see it in action with a simple example. - -#### Fitting a funtion with a QNN using `ml_tools` - -Let's look at a complete example of how to use `train_with_grad` now. - -```python exec="on" source="material-block" result="json" -from pathlib import Path -import torch -from itertools import count -from qadence.constructors import hamiltonian_factory, hea, feature_map -from qadence import chain, Parameter, QuantumCircuit, Z -from qadence.models import QNN -from qadence.ml_tools import train_with_grad, TrainConfig -import matplotlib.pyplot as plt - -n_qubits = 2 -fm = feature_map(n_qubits) -ansatz = hea(n_qubits=n_qubits, depth=3) -observable = hamiltonian_factory(n_qubits, detuning=Z) -circuit = QuantumCircuit(n_qubits, fm, ansatz) - -model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") -batch_size = 1 -input_values = {"phi": torch.rand(batch_size, requires_grad=True)} -pred = model(input_values) - -cnt = count() -criterion = torch.nn.MSELoss() -optimizer = torch.optim.Adam(model.parameters(), lr=0.1) - -def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: - next(cnt) - x, y = data[0], data[1] - out = model(x) - loss = criterion(out, y) - return loss, {} - -tmp_path = Path("/tmp") - -n_epochs = 5 - -config = TrainConfig( - folder=tmp_path, - max_iter=n_epochs, - checkpoint_every=100, - write_every=100, - batch_size=batch_size, -) - -batch_size = 25 - -x = torch.linspace(0, 1, batch_size).reshape(-1, 1) -y = torch.sin(x) - -train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) - -plt.plot(y.numpy()) -plt.plot(model(input_values).detach().numpy()) -``` - -For users who want to use the low-level API of `qadence`, here is the example from above -written without `train_with_grad`. - -#### Fitting a function - Low-level API - -```python exec="on" source="material-block" result="json" -from pathlib import Path -import torch -from itertools import count -from qadence.constructors import hamiltonian_factory, hea, feature_map -from qadence import chain, Parameter, QuantumCircuit, Z -from qadence.models import QNN -from qadence.ml_tools import train_with_grad, TrainConfig - -n_qubits = 2 -fm = feature_map(n_qubits) -ansatz = hea(n_qubits=n_qubits, depth=3) -observable = hamiltonian_factory(n_qubits, detuning=Z) -circuit = QuantumCircuit(n_qubits, fm, ansatz) - -model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") -batch_size = 1 -input_values = {"phi": torch.rand(batch_size, requires_grad=True)} -pred = model(input_values) - -criterion = torch.nn.MSELoss() -optimizer = torch.optim.Adam(model.parameters(), lr=0.1) -n_epochs=50 -cnt = count() - -tmp_path = Path("/tmp") - -config = TrainConfig( - folder=tmp_path, - max_iter=n_epochs, - checkpoint_every=100, - write_every=100, - batch_size=batch_size, -) - -x = torch.linspace(0, 1, batch_size).reshape(-1, 1) -y = torch.sin(x) - -for i in range(n_epochs): - out = model(x) - loss = criterion(out, y) - loss.backward() - optimizer.step() -``` diff --git a/docs/tutorials/parameters.md b/docs/tutorials/parameters.md index 6cd1f7ac..f9669de9 100644 --- a/docs/tutorials/parameters.md +++ b/docs/tutorials/parameters.md @@ -261,7 +261,7 @@ print(html_string(circuit)) # markdown-exec: hide print(html_string(circuit)) # markdown-exec: hide ``` -The `hea` function will be further explored in the [QML Constructors tutorial](../qml/qml_tools.md). +The `hea` function will be further explored in the [QML Constructors tutorial](../qml/qml_constructors.md). ## Parametric observables diff --git a/docs/tutorials/quantummodels.md b/docs/tutorials/quantummodels.md index 836e7ca8..326de478 100644 --- a/docs/tutorials/quantummodels.md +++ b/docs/tutorials/quantummodels.md @@ -85,5 +85,5 @@ print(f"{ex = }") # markdown-exec: hide ### Quantum Neural Network (QNN) The `QNN` is a subclass of the `QuantumModel` geared towards quantum machine learning and parameter optimisation. See the -[machine learning tools](../qml/qml_tools.md) section or the [`QNN` API reference][qadence.models.QNN] for more detailed +[quantum machine learning section](../qml/index.md) section or the [`QNN` API reference][qadence.models.QNN] for more detailed information, and the [parametric program tutorial](parameters.md) for parameterization. diff --git a/mkdocs.yml b/mkdocs.yml index c09f5e24..faf74e56 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,9 +28,10 @@ nav: - Variational quantum algorithms: - qml/index.md + - Constructors: qml/qml_constructors.md + - Training tools: qml/ml_tools.md - Quantum circuit learning: qml/qcl.md - Solving MaxCut with QAOA: qml/qaoa.md - - Tools for quantum machine learning: qml/qml_tools.md - Advanced Tutorials: - Quantum circuits differentiation: advanced_tutorials/differentiability.md