Skip to content

Commit

Permalink
Higher order tests
Browse files Browse the repository at this point in the history
  • Loading branch information
dominikandreasseitz committed Nov 7, 2023
1 parent c2e78ae commit 10e63f0
Showing 3 changed files with 153 additions and 67 deletions.
131 changes: 67 additions & 64 deletions qadence/backends/adjoint.py
Original file line number Diff line number Diff line change
@@ -7,16 +7,16 @@
from pyqtorch.parametric import Parametric as PyQParametric
from pyqtorch.primitive import Primitive as PyQPrimitive
from pyqtorch.utils import overlap, param_dict
from torch import Tensor, no_grad, tensor
from torch import Tensor, nn, no_grad, tensor
from torch.autograd import Function

from qadence.backends.pyqtorch.convert_ops import PyQHamiltonianEvolution, ScalePyQOperation
from qadence.blocks.abstract import AbstractBlock


class AdjointExpectation(Function):
@no_grad()
@staticmethod
@no_grad()
def forward(
ctx: Any,
circuit: PyQCircuit,
@@ -36,78 +36,81 @@ def forward(
ctx.save_for_backward(*param_values)
return overlap(ctx.out_state, ctx.projected_state)

@no_grad()
@staticmethod
@no_grad()
def backward(ctx: Any, grad_out: Tensor) -> tuple:
def _apply_adjoint(ctx: Any, circuit: PyQCircuit, grad_out: Tensor = tensor([1])) -> Any:
param_values = ctx.saved_tensors
values = param_dict(ctx.param_names, param_values)
param_values = ctx.saved_tensors
values = param_dict(ctx.param_names, param_values)

def _apply_adjoint(ctx: Any, op: nn.Module, grad_out: Tensor = tensor([1.0])) -> list:
grads: list = []
for op in circuit.reverse():
if isinstance(op, (PyQHamiltonianEvolution)):
generator = op.block.generator
time_param = values[op.param_names[0]]
if isinstance(op, (PyQHamiltonianEvolution)):
generator = op.block.generator
time_param = values[op.param_names[0]]

ctx.out_state = apply_operator(
ctx.out_state, op.dagger(values), op.qubit_support
ctx.out_state = apply_operator(ctx.out_state, op.dagger(values), op.qubit_support)
if (
isinstance(generator, AbstractBlock)
and generator.is_parametric
and values[op.param_names[1]].requires_grad
):
mu = apply_operator(
ctx.out_state, op.jacobian_generator(values), op.qubit_support
)
if (
isinstance(generator, AbstractBlock)
and generator.is_parametric
and values[op.param_names[1]].requires_grad
):
mu = apply_operator(
ctx.out_state, op.jacobian_generator(values), op.qubit_support
)
grads = [grad_out * 2 * overlap(ctx.projected_state, mu)] + grads
elif time_param.requires_grad:
mu = apply_operator(
ctx.out_state, op.jacobian_time(values), op.qubit_support
)
grads = [grad_out * 2 * overlap(ctx.projected_state, mu)] + grads
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
grads.append(grad_out * 2 * overlap(ctx.projected_state, mu))
elif time_param.requires_grad:
mu = apply_operator(ctx.out_state, op.jacobian_time(values), op.qubit_support)
grads.append(grad_out * 2 * overlap(ctx.projected_state, mu))
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
)
elif isinstance(op, ScalePyQOperation):
ctx.out_state = apply_operator(ctx.out_state, op.dagger(values), op.qubit_support)
scaled_pyq_op = op.operations[0]
if (
isinstance(scaled_pyq_op, PyQParametric)
and values[scaled_pyq_op.param_name].requires_grad
):
mu = apply_operator(
ctx.out_state,
scaled_pyq_op.jacobian(values),
scaled_pyq_op.qubit_support,
)
elif isinstance(op, ScalePyQOperation):
ctx.out_state = apply_operator(
ctx.out_state, op.dagger(values), op.qubit_support
)
scaled_pyq_op = op.operations[0]
if (
isinstance(scaled_pyq_op, PyQParametric)
and values[scaled_pyq_op.param_name].requires_grad
):
mu = apply_operator(
ctx.out_state,
scaled_pyq_op.jacobian(values),
scaled_pyq_op.qubit_support,
)
grads = [grad_out * 2 * overlap(ctx.projected_state, mu)] + grads
grads.append(grad_out * 2 * overlap(ctx.projected_state, mu))

if values[op.param_name].requires_grad:
grads += [grad_out * 2 * -values[op.param_name]]
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
)
elif isinstance(op, PyQCircuit):
grads += [grad_out * g for g in _apply_adjoint(ctx, op)]
elif isinstance(op, (PyQPrimitive)):
ctx.out_state = apply_operator(
ctx.out_state, op.dagger(values), op.qubit_support
)
if isinstance(op, (PyQParametric)) and values[op.param_name].requires_grad:
mu = apply_operator(ctx.out_state, op.jacobian(values), op.qubit_support)
grads = [grad_out * 2 * overlap(ctx.projected_state, mu)] + grads
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
)
else:
raise TypeError(
f"AdjointExpectation does not support a backward pass for type {type(op)}."
if values[op.param_name].requires_grad:
grads.append(grad_out * 2 * -values[op.param_name])
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
)
elif isinstance(op, PyQCircuit):
grads = [
grad_out * g for sub_op in op.reverse() for g in _apply_adjoint(ctx, sub_op)
]
elif isinstance(op, (PyQPrimitive)):
ctx.out_state = apply_operator(ctx.out_state, op.dagger(values), op.qubit_support)
if isinstance(op, (PyQParametric)) and values[op.param_name].requires_grad:
mu = apply_operator(
ctx.out_state,
op.jacobian(values),
op.qubit_support,
)
grads.append(grad_out * 2 * overlap(ctx.projected_state, mu))
ctx.projected_state = apply_operator(
ctx.projected_state, op.dagger(values), op.qubit_support
)
else:
raise TypeError(
f"AdjointExpectation does not support a backward pass for type {type(op)}."
)

return grads

grads = _apply_adjoint(ctx, ctx.circuit, grad_out=grad_out)
grads = list(
reversed(
[grad_out * g for op in ctx.circuit.reverse() for g in _apply_adjoint(ctx, op)]
)
)
num_grads = len(grads)
num_params = len(ctx.saved_tensors)
diff = num_params - num_grads
45 changes: 45 additions & 0 deletions qadence/backends/utils.py
Original file line number Diff line number Diff line change
@@ -6,6 +6,9 @@

import numpy as np
import torch
from pyqtorch.apply import apply_operator
from pyqtorch.parametric import Parametric as PyQParametric
from pyqtorch.utils import overlap
from torch import Tensor

from qadence.utils import Endianness, int_to_basis
@@ -94,10 +97,52 @@ def to_list_of_dicts(param_values: dict[str, Tensor]) -> list[dict[str, float]]:
return [{k: v[i] for k, v in batched_values.items()} for i in range(max_batch_size)]


def finitediff_sampling(
f: Callable, x: torch.Tensor, eps: float = FINITE_DIFF_EPS, num_samples: int = 10
) -> torch.Tensor:
def _finitediff(val: torch.Tensor) -> torch.Tensor:
return (f(x + val) - f(x - val)) / (2 * val) # type: ignore

with torch.no_grad():
return torch.mean(
torch.cat([_finitediff(val) for val in torch.rand(1) for _ in range(num_samples)])
)


def finitediff(f: Callable, x: torch.Tensor, eps: float = FINITE_DIFF_EPS) -> torch.Tensor:
return (f(x + eps) - f(x - eps)) / (2 * eps) # type: ignore


def dydx(
jacobian: torch.Tensor,
qubit_support: tuple,
out_state: torch.Tensor,
projected_state: torch.Tensor,
) -> torch.Tensor:
return 2 * overlap(
projected_state,
apply_operator(
state=out_state,
operator=jacobian,
qubits=qubit_support,
),
)


def dydxx(
op: PyQParametric,
values: dict[str, torch.Tensor],
out_state: torch.Tensor,
projected_state: torch.Tensor,
) -> torch.Tensor:
return 2 * finitediff_sampling(
lambda val: dydx(
op.jacobian({op.param_name: val}), op.qubit_support, out_state, projected_state
),
values[op.param_name],
)


def pyqify(state: torch.Tensor, n_qubits: int = None) -> torch.Tensor:
if n_qubits is None:
n_qubits = int(log2(state.shape[1]))
44 changes: 41 additions & 3 deletions tests/backends/test_adjoint.py
Original file line number Diff line number Diff line change
@@ -5,6 +5,7 @@
from metrics import ADJOINT_ACCEPTANCE

from qadence.backends.api import backend_factory
from qadence.backends.utils import dydx, dydxx
from qadence.blocks import AbstractBlock, chain
from qadence.circuit import QuantumCircuit
from qadence.constructors import hea
@@ -20,7 +21,7 @@ def test_pyq_differentiation(diff_mode: str) -> None:
batch_size = 1
n_qubits = 2
observable: list[AbstractBlock] = [Z(0)]
circ = QuantumCircuit(n_qubits, chain(RX(0, "x"), CPHASE(0, 1, "y")))
circ = QuantumCircuit(n_qubits, chain(RX(0, 3 * "x"), CPHASE(0, 1, "y")))

bknd = backend_factory(backend="pyqtorch", diff_mode=diff_mode)
pyqtorch_circ, pyqtorch_obs, embeddings_fn, params = bknd.convert(circ, observable)
@@ -33,8 +34,12 @@ def func(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
all_params = embeddings_fn(params, inputs)
return bknd.expectation(pyqtorch_circ, pyqtorch_obs, all_params)

assert torch.autograd.gradcheck(lambda x: func(x, inputs_y), inputs_x)
assert torch.autograd.gradcheck(lambda y: func(inputs_x, y), inputs_y)
assert torch.autograd.gradcheck(
lambda x: func(x, inputs_y), inputs_x, nondet_tol=ADJOINT_ACCEPTANCE
)
assert torch.autograd.gradcheck(
lambda y: func(inputs_x, y), inputs_y, nondet_tol=ADJOINT_ACCEPTANCE
)


@pytest.mark.parametrize("diff_mode", [DiffMode.ADJOINT])
@@ -122,3 +127,36 @@ def func(theta: torch.Tensor) -> torch.Tensor:
return backend.expectation(pyqtorch_circ, pyqtorch_obs, all_params)

assert torch.autograd.gradcheck(func, theta, nondet_tol=ADJOINT_ACCEPTANCE)


@pytest.mark.flaky
def test_higher_order() -> None:
batch_size = 1
n_qubits = 1
observable: list[AbstractBlock] = [Z(0)]
circ = QuantumCircuit(n_qubits, chain(RX(0, "x")))

bknd = backend_factory(backend="pyqtorch", diff_mode="ad")
pyqtorch_circ, pyqtorch_obs, embeddings_fn, params = bknd.convert(circ, observable)

inputs_x = torch.rand(batch_size, requires_grad=True)

inputs = {"x": inputs_x}
all_params = embeddings_fn(params, inputs)
out_state = pyqtorch_circ.native.run(values=all_params)
projected_state = pyqtorch_obs[0].native.run(out_state, all_params)
op = pyqtorch_circ.native.operations[0].operations[0]
with torch.no_grad():
dydx_res = dydx(op.jacobian({"x": inputs_x}), op.qubit_support, out_state, projected_state)
dydxx_res = dydxx(op, {"x": inputs_x}, out_state, projected_state)

def func(x: torch.Tensor) -> torch.Tensor:
inputs = {"x": x}
all_params = embeddings_fn(params, inputs)
return bknd.expectation(pyqtorch_circ, pyqtorch_obs, all_params)

exp = func(inputs_x)
grad = torch.autograd.grad(exp, inputs_x, torch.ones_like(exp), create_graph=True)[0]
gradgrad = torch.autograd.grad(grad, inputs_x, torch.ones_like(grad), retain_graph=True)[0]
assert torch.allclose(dydx_res, grad, atol=ADJOINT_ACCEPTANCE)
assert torch.allclose(dydxx_res, gradgrad, atol=ADJOINT_ACCEPTANCE)

0 comments on commit 10e63f0

Please sign in to comment.