Skip to content

Commit

Permalink
Latest changes.
Browse files Browse the repository at this point in the history
  • Loading branch information
RolandMacDoland committed Oct 4, 2023
1 parent c07aaad commit beb4aeb
Show file tree
Hide file tree
Showing 10 changed files with 110 additions and 109 deletions.
11 changes: 2 additions & 9 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ dependencies = [
"jsonschema",
"nevergrad",
"scipy<1.11",
"pyqtorch==0.5.0"
"pyqtorch==0.5.0",
"matplotlib"
]

[tool.hatch.metadata]
Expand All @@ -55,7 +56,6 @@ visualization = [
# "latex2svg @ git+https://github.com/Moonbase59/latex2svg.git#egg=latex2svg",
# "scour",
]
scipy = ["scipy<1.11"]
all = [
"pulser>=0.12.0",
"amazon-braket-sdk",
Expand All @@ -74,15 +74,11 @@ dependencies = [
"pytest-cov",
"pytest-mypy",
"pytest-xdist",
"nbconvert",
"ipykernel",
"jupyter_contrib_nbextensions",
"pre-commit",
"black",
"isort",
"ruff",
"notebook<7.0",
"dill",
]
features = ["all"]

Expand Down Expand Up @@ -123,9 +119,6 @@ dependencies = [
"mkdocs-jupyter",
"mkdocs-exclude",
"markdown-exec",
"notebook<7",
"jupyter_contrib_nbextensions",
"dill",
]
features = ["pulser", "braket", "visualization"]

Expand Down
2 changes: 2 additions & 0 deletions qadence/ml_tools/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ class TrainConfig:
"""A boolean function which evaluates a given validation metric is satisfied"""
trainstop_criterion: Optional[Callable] = None
"""A boolean function which evaluates a given training stopping metric is satisfied"""
batch_size: int = 1
"""The batch_size to use when passing a list/tuple of torch.Tensors."""

def __post_init__(self) -> None:
if self.folder:
Expand Down
7 changes: 6 additions & 1 deletion qadence/ml_tools/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from dataclasses import dataclass

import torch
from torch.utils.data import DataLoader
from torch.utils.data import DataLoader, TensorDataset


@dataclass
Expand All @@ -25,3 +25,8 @@ def __iter__(self) -> DictDataLoader:

def __next__(self) -> dict[str, torch.Tensor]:
return {key: next(it) for key, it in self.iters.items()}


def to_dataloader(x: torch.Tensor, y: torch.Tensor, batch_size: int = 1) -> DataLoader:
"""Convert two torch tensors x and y to a Dataloader."""
return DataLoader(TensorDataset(x, y), batch_size=batch_size)
57 changes: 56 additions & 1 deletion qadence/ml_tools/train_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Callable

from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
Expand All @@ -20,7 +21,7 @@

def train(
model: Module,
dataloader: DictDataLoader | DataLoader | None,
dataloader: DictDataLoader | DataLoader | list[Tensor] | tuple[Tensor, Tensor] | None,
optimizer: Optimizer,
config: TrainConfig,
loss_fn: Callable,
Expand Down Expand Up @@ -57,6 +58,54 @@ def train(
called every `config.write_every` iterations. The function must have
the signature `write_tensorboard(writer, loss, metrics, iteration)`
(see the example below).
Example:
```python exec="on" source="material-block"
from pathlib import Path
import torch
from itertools import count
from qadence.constructors import total_magnetization, hea, feature_map
from qadence import chain, Parameter, QuantumCircuit
from qadence.models import QNN
from qadence.ml_tools import train_with_grad, TrainConfig
n_qubits = 2
fm = feature_map(n_qubits)
ansatz = hea(n_qubits=n_qubits, depth=3)
observable = total_magnetization(n_qubits)
circuit = QuantumCircuit(n_qubits, fm, ansatz)
model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad")
batch_size = 1
input_values = {"phi": torch.rand(batch_size, requires_grad=True)}
pred = model(input_values)
## lets prepare the train routine
cnt = count()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]:
next(cnt)
x, y = data[0], data[1]
out = model(x)
loss = criterion(out, y)
return loss, {}
tmp_path = Path("/tmp")
n_epochs = 5
config = TrainConfig(
folder=tmp_path,
max_iter=n_epochs,
checkpoint_every=100,
write_every=100,
batch_size=batch_size,
)
batch_size = 25
x = torch.linspace(0, 1, batch_size).reshape(-1, 1)
y = torch.sin(x)
train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn)
```
"""

assert loss_fn is not None, "Provide a valid loss function"
Expand All @@ -79,6 +128,12 @@ def train(
TaskProgressColumn(),
TimeRemainingColumn(elapsed_when_finished=True),
)
if isinstance(dataloader, (list, tuple)):
from qadence.ml_tools.data import to_dataloader

assert len(dataloader) == 2, "Please provide exactly two torch tensors."
x, y = dataloader
dataloader = to_dataloader(x=x, y=y, batch_size=config.batch_size)
with progress:
dl_iter = iter(dataloader) if isinstance(dataloader, DictDataLoader) else None

Expand Down
2 changes: 2 additions & 0 deletions qadence/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -1171,6 +1171,8 @@ def AnalogRot(
ConstantAnalogRotation
"""
q = _cast(QubitSupport, qubit_support)
if isinstance(duration, str):
duration = Parameter(duration)
alpha = duration * sympy.sqrt(omega**2 + delta**2) / 1000 # type: ignore [operator]

ps = ParamMap(alpha=alpha, duration=duration, omega=omega, delta=delta, phase=phase)
Expand Down
4 changes: 2 additions & 2 deletions qadence/states.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def random_state(
n_qubits = 2
# The default is StateGeneratorType.HAARMEASUREFAST
# The default is StateGeneratorType.HAAR_MEASURE_FAST
state = random_state(n_qubits=n_qubits)
print(state)
Expand All @@ -322,7 +322,7 @@ def random_state(

if type == StateGeneratorType.HAAR_MEASURE_FAST:
state = concat(tuple(_rand_haar_fast(n_qubits) for _ in range(batch_size)), dim=0)
elif type == StateGeneratorType.HAAR_MEASURE_FAST:
elif type == StateGeneratorType.HAAR_MEASURE_SLOW:
state = concat(tuple(_rand_haar_slow(n_qubits) for _ in range(batch_size)), dim=0)
elif type == StateGeneratorType.RANDOM_ROTATIONS:
state = _run_state(_abstract_random_state(n_qubits, batch_size), backend) # type: ignore
Expand Down
15 changes: 6 additions & 9 deletions readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
version: 2

build:
os: "ubuntu-22.04"
tools:
python: "3.10"
apt_packages:
- graphviz
commands:
- pip install hatch
- hatch -v run docs:build
- mkdir _readthedocs/
- mv site _readthedocs/html
python: "mambaforge-22.9"

conda:
environment: docs/environment.yml

mkdocs:
configuration: mkdocs.yml
configuration: mkdocs.yml
31 changes: 1 addition & 30 deletions tests/backends/pulser_basic/test_entanglement.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from qadence.backends.pulser import Device
from qadence.blocks import AbstractBlock, chain
from qadence.divergences import js_divergence
from qadence.operations import RY, AnalogRot, entangle, wait
from qadence.operations import RY, entangle
from qadence.register import Register


Expand All @@ -24,35 +24,6 @@
Register(2),
Counter({"00": 250, "11": 250}),
),
# Four qubits GHZ state
(
chain(
AnalogRot(duration=100, omega=5 * torch.pi, delta=0, phase=0),
wait(2300),
AnalogRot(duration=300, omega=5 * torch.pi, delta=0, phase=0),
),
Register.square(qubits_side=2),
Counter(
{
"1111": 145,
"1110": 15,
"1101": 15,
"1100": 15,
"1011": 15,
"1010": 15,
"1001": 15,
"1000": 15,
"0111": 15,
"0110": 15,
"0101": 15,
"0100": 15,
"0011": 15,
"0010": 15,
"0001": 15,
"0000": 145,
}
),
),
],
)
def test_entanglement(blocks: AbstractBlock, register: Register, goal: Counter) -> None:
Expand Down
57 changes: 0 additions & 57 deletions tests/backends/pulser_basic/test_quantum_pulser.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,18 @@
from __future__ import annotations

from collections import Counter

import pytest
import torch
from metrics import JS_ACCEPTANCE

from qadence import (
RX,
RY,
AnalogRot,
BackendName,
FeatureParameter,
QuantumCircuit,
Register,
VariationalParameter,
backend_factory,
chain,
entangle,
kron,
total_magnetization,
)
from qadence.backends.pulser import Device
from qadence.divergences import js_divergence


@pytest.fixture
Expand All @@ -35,53 +25,6 @@ def batched_circuit() -> QuantumCircuit:
return QuantumCircuit(n_qubits, block)


@pytest.mark.parametrize(
"circuit,goal",
[
(
QuantumCircuit(
Register(2), chain(entangle(383, qubit_support=(0, 1)), RY(0, 3 * torch.pi / 2))
),
Counter({"00": 250, "11": 250}),
),
(
QuantumCircuit(
Register.square(qubits_side=2),
chain(
entangle(2488),
AnalogRot(duration=300, omega=5 * torch.pi, delta=0, phase=0),
),
),
Counter(
{
"1111": 145,
"1110": 15,
"1101": 15,
"1100": 15,
"1011": 15,
"1010": 15,
"1001": 15,
"1000": 15,
"0111": 15,
"0110": 15,
"0101": 15,
"0100": 15,
"0011": 15,
"0010": 15,
"0001": 15,
"0000": 145,
}
),
),
],
)
def test_pulser_sequence_sample(circuit: QuantumCircuit, goal: Counter) -> None:
config = {"device_type": Device.REALISTIC}
backend = backend_factory(backend=BackendName.PULSER, diff_mode=None, configuration=config)
sample = backend.sample(backend.circuit(circuit), {}, n_shots=500)[0]
assert js_divergence(sample, goal) < JS_ACCEPTANCE


def test_expectation_batched(batched_circuit: QuantumCircuit) -> None:
batch_size = 3
values = {"phi": torch.tensor([torch.pi / 5, torch.pi / 4, torch.pi / 3])}
Expand Down
33 changes: 33 additions & 0 deletions tests/ml_tools/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,36 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d
model, optimizer = train_with_grad(model, data, optimizer, config, loss_fn=loss_fn)
x = torch.rand(1)
assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1)


@pytest.mark.flaky(max_runs=10)
def test_train_tensor_tuple(tmp_path: Path, Basic: torch.nn.Module) -> None:
model = Basic
batch_size = 25
x = torch.linspace(0, 1, batch_size).reshape(-1, 1)
y = torch.sin(x)

cnt = count()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]:
next(cnt)
x, y = data[0], data[1]
out = model(x)
loss = criterion(out, y)
return loss, {}

n_epochs = 100
config = TrainConfig(
folder=tmp_path,
max_iter=n_epochs,
checkpoint_every=100,
write_every=100,
batch_size=batch_size,
)
train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn)
assert next(cnt) == n_epochs

x = torch.rand(5, 1)
assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1)

0 comments on commit beb4aeb

Please sign in to comment.