diff --git a/pyproject.toml b/pyproject.toml index ea9004e57..3579b0adc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,8 @@ dependencies = [ "jsonschema", "nevergrad", "scipy<1.11", - "pyqtorch==0.5.0" + "pyqtorch==0.5.0", + "matplotlib" ] [tool.hatch.metadata] @@ -55,7 +56,6 @@ visualization = [ # "latex2svg @ git+https://github.com/Moonbase59/latex2svg.git#egg=latex2svg", # "scour", ] -scipy = ["scipy<1.11"] all = [ "pulser>=0.12.0", "amazon-braket-sdk", @@ -74,15 +74,11 @@ dependencies = [ "pytest-cov", "pytest-mypy", "pytest-xdist", - "nbconvert", "ipykernel", - "jupyter_contrib_nbextensions", "pre-commit", "black", "isort", "ruff", - "notebook<7.0", - "dill", ] features = ["all"] @@ -123,9 +119,6 @@ dependencies = [ "mkdocs-jupyter", "mkdocs-exclude", "markdown-exec", - "notebook<7", - "jupyter_contrib_nbextensions", - "dill", ] features = ["pulser", "braket", "visualization"] diff --git a/qadence/ml_tools/config.py b/qadence/ml_tools/config.py index 439ead1fe..dfbb71199 100644 --- a/qadence/ml_tools/config.py +++ b/qadence/ml_tools/config.py @@ -38,6 +38,8 @@ class TrainConfig: """A boolean function which evaluates a given validation metric is satisfied""" trainstop_criterion: Optional[Callable] = None """A boolean function which evaluates a given training stopping metric is satisfied""" + batch_size: int = 1 + """The batch_size to use when passing a list/tuple of torch.Tensors.""" def __post_init__(self) -> None: if self.folder: diff --git a/qadence/ml_tools/data.py b/qadence/ml_tools/data.py index 08fc2c954..17d9739ba 100644 --- a/qadence/ml_tools/data.py +++ b/qadence/ml_tools/data.py @@ -3,7 +3,7 @@ from dataclasses import dataclass import torch -from torch.utils.data import DataLoader +from torch.utils.data import DataLoader, TensorDataset @dataclass @@ -25,3 +25,8 @@ def __iter__(self) -> DictDataLoader: def __next__(self) -> dict[str, torch.Tensor]: return {key: next(it) for key, it in self.iters.items()} + + +def to_dataloader(x: torch.Tensor, y: torch.Tensor, batch_size: int = 1) -> DataLoader: + """Convert two torch tensors x and y to a Dataloader.""" + return DataLoader(TensorDataset(x, y), batch_size=batch_size) diff --git a/qadence/ml_tools/train_grad.py b/qadence/ml_tools/train_grad.py index 9e8aa6c07..e17edb8c2 100644 --- a/qadence/ml_tools/train_grad.py +++ b/qadence/ml_tools/train_grad.py @@ -3,6 +3,7 @@ from typing import Callable from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn +from torch import Tensor from torch.nn import Module from torch.optim import Optimizer from torch.utils.data import DataLoader @@ -20,7 +21,7 @@ def train( model: Module, - dataloader: DictDataLoader | DataLoader | None, + dataloader: DictDataLoader | DataLoader | list[Tensor] | tuple[Tensor, Tensor] | None, optimizer: Optimizer, config: TrainConfig, loss_fn: Callable, @@ -57,6 +58,54 @@ def train( called every `config.write_every` iterations. The function must have the signature `write_tensorboard(writer, loss, metrics, iteration)` (see the example below). + + Example: + ```python exec="on" source="material-block" + from pathlib import Path + import torch + from itertools import count + from qadence.constructors import total_magnetization, hea, feature_map + from qadence import chain, Parameter, QuantumCircuit + from qadence.models import QNN + from qadence.ml_tools import train_with_grad, TrainConfig + + n_qubits = 2 + fm = feature_map(n_qubits) + ansatz = hea(n_qubits=n_qubits, depth=3) + observable = total_magnetization(n_qubits) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + + model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + + ## lets prepare the train routine + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + tmp_path = Path("/tmp") + n_epochs = 5 + config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, + ) + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + ``` """ assert loss_fn is not None, "Provide a valid loss function" @@ -79,6 +128,12 @@ def train( TaskProgressColumn(), TimeRemainingColumn(elapsed_when_finished=True), ) + if isinstance(dataloader, (list, tuple)): + from qadence.ml_tools.data import to_dataloader + + assert len(dataloader) == 2, "Please provide exactly two torch tensors." + x, y = dataloader + dataloader = to_dataloader(x=x, y=y, batch_size=config.batch_size) with progress: dl_iter = iter(dataloader) if isinstance(dataloader, DictDataLoader) else None diff --git a/qadence/operations.py b/qadence/operations.py index d9a04cc97..8caaebb5b 100644 --- a/qadence/operations.py +++ b/qadence/operations.py @@ -1171,6 +1171,8 @@ def AnalogRot( ConstantAnalogRotation """ q = _cast(QubitSupport, qubit_support) + if isinstance(duration, str): + duration = Parameter(duration) alpha = duration * sympy.sqrt(omega**2 + delta**2) / 1000 # type: ignore [operator] ps = ParamMap(alpha=alpha, duration=duration, omega=omega, delta=delta, phase=phase) diff --git a/qadence/states.py b/qadence/states.py index 89982a307..b7ee2eb34 100644 --- a/qadence/states.py +++ b/qadence/states.py @@ -310,7 +310,7 @@ def random_state( n_qubits = 2 - # The default is StateGeneratorType.HAARMEASUREFAST + # The default is StateGeneratorType.HAAR_MEASURE_FAST state = random_state(n_qubits=n_qubits) print(state) @@ -322,7 +322,7 @@ def random_state( if type == StateGeneratorType.HAAR_MEASURE_FAST: state = concat(tuple(_rand_haar_fast(n_qubits) for _ in range(batch_size)), dim=0) - elif type == StateGeneratorType.HAAR_MEASURE_FAST: + elif type == StateGeneratorType.HAAR_MEASURE_SLOW: state = concat(tuple(_rand_haar_slow(n_qubits) for _ in range(batch_size)), dim=0) elif type == StateGeneratorType.RANDOM_ROTATIONS: state = _run_state(_abstract_random_state(n_qubits, batch_size), backend) # type: ignore diff --git a/readthedocs.yml b/readthedocs.yml index f68f10e4d..718c036c6 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,15 +1,12 @@ version: 2 + build: os: "ubuntu-22.04" tools: - python: "3.10" - apt_packages: - - graphviz - commands: - - pip install hatch - - hatch -v run docs:build - - mkdir _readthedocs/ - - mv site _readthedocs/html + python: "mambaforge-22.9" + +conda: + environment: docs/environment.yml mkdocs: - configuration: mkdocs.yml + configuration: mkdocs.yml diff --git a/tests/backends/pulser_basic/test_entanglement.py b/tests/backends/pulser_basic/test_entanglement.py index 189548a0a..d2a68a457 100644 --- a/tests/backends/pulser_basic/test_entanglement.py +++ b/tests/backends/pulser_basic/test_entanglement.py @@ -11,7 +11,7 @@ from qadence.backends.pulser import Device from qadence.blocks import AbstractBlock, chain from qadence.divergences import js_divergence -from qadence.operations import RY, AnalogRot, entangle, wait +from qadence.operations import RY, entangle from qadence.register import Register @@ -24,35 +24,6 @@ Register(2), Counter({"00": 250, "11": 250}), ), - # Four qubits GHZ state - ( - chain( - AnalogRot(duration=100, omega=5 * torch.pi, delta=0, phase=0), - wait(2300), - AnalogRot(duration=300, omega=5 * torch.pi, delta=0, phase=0), - ), - Register.square(qubits_side=2), - Counter( - { - "1111": 145, - "1110": 15, - "1101": 15, - "1100": 15, - "1011": 15, - "1010": 15, - "1001": 15, - "1000": 15, - "0111": 15, - "0110": 15, - "0101": 15, - "0100": 15, - "0011": 15, - "0010": 15, - "0001": 15, - "0000": 145, - } - ), - ), ], ) def test_entanglement(blocks: AbstractBlock, register: Register, goal: Counter) -> None: diff --git a/tests/backends/pulser_basic/test_quantum_pulser.py b/tests/backends/pulser_basic/test_quantum_pulser.py index ca4a09061..aa6ed5cc1 100644 --- a/tests/backends/pulser_basic/test_quantum_pulser.py +++ b/tests/backends/pulser_basic/test_quantum_pulser.py @@ -1,28 +1,18 @@ from __future__ import annotations -from collections import Counter - import pytest import torch -from metrics import JS_ACCEPTANCE from qadence import ( RX, - RY, - AnalogRot, BackendName, FeatureParameter, QuantumCircuit, - Register, VariationalParameter, backend_factory, - chain, - entangle, kron, total_magnetization, ) -from qadence.backends.pulser import Device -from qadence.divergences import js_divergence @pytest.fixture @@ -35,53 +25,6 @@ def batched_circuit() -> QuantumCircuit: return QuantumCircuit(n_qubits, block) -@pytest.mark.parametrize( - "circuit,goal", - [ - ( - QuantumCircuit( - Register(2), chain(entangle(383, qubit_support=(0, 1)), RY(0, 3 * torch.pi / 2)) - ), - Counter({"00": 250, "11": 250}), - ), - ( - QuantumCircuit( - Register.square(qubits_side=2), - chain( - entangle(2488), - AnalogRot(duration=300, omega=5 * torch.pi, delta=0, phase=0), - ), - ), - Counter( - { - "1111": 145, - "1110": 15, - "1101": 15, - "1100": 15, - "1011": 15, - "1010": 15, - "1001": 15, - "1000": 15, - "0111": 15, - "0110": 15, - "0101": 15, - "0100": 15, - "0011": 15, - "0010": 15, - "0001": 15, - "0000": 145, - } - ), - ), - ], -) -def test_pulser_sequence_sample(circuit: QuantumCircuit, goal: Counter) -> None: - config = {"device_type": Device.REALISTIC} - backend = backend_factory(backend=BackendName.PULSER, diff_mode=None, configuration=config) - sample = backend.sample(backend.circuit(circuit), {}, n_shots=500)[0] - assert js_divergence(sample, goal) < JS_ACCEPTANCE - - def test_expectation_batched(batched_circuit: QuantumCircuit) -> None: batch_size = 3 values = {"phi": torch.tensor([torch.pi / 5, torch.pi / 4, torch.pi / 3])} diff --git a/tests/ml_tools/test_train.py b/tests/ml_tools/test_train.py index ed98c9455..8ac8c37c5 100644 --- a/tests/ml_tools/test_train.py +++ b/tests/ml_tools/test_train.py @@ -158,3 +158,36 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d model, optimizer = train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) x = torch.rand(1) assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1) + + +@pytest.mark.flaky(max_runs=10) +def test_train_tensor_tuple(tmp_path: Path, Basic: torch.nn.Module) -> None: + model = Basic + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + + n_epochs = 100 + config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, + ) + train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + x = torch.rand(5, 1) + assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1)