Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Single gap GPSR #213

Merged
merged 12 commits into from
Jul 4, 2024
5 changes: 4 additions & 1 deletion pyqtorch/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from pyqtorch.adjoint import AdjointExpectation
from pyqtorch.analog import Observable
from pyqtorch.circuit import QuantumCircuit
from pyqtorch.gpsr import PSRExpectation
from pyqtorch.utils import DiffMode, inner_prod

logger = getLogger(__name__)
Expand Down Expand Up @@ -94,6 +95,8 @@ def expectation(
circuit, observable, state, values.keys(), *values.values()
)
elif diff_mode == DiffMode.GPSR:
raise NotImplementedError("To be added.")
return PSRExpectation.apply(
circuit, observable, state, values.keys(), *values.values()
)
else:
logger.error(f"Requested diff_mode '{diff_mode}' not supported.")
86 changes: 86 additions & 0 deletions pyqtorch/gpsr.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from __future__ import annotations

from logging import getLogger
from typing import Any, Tuple

import torch
from torch import Tensor, no_grad
from torch.autograd import Function

import pyqtorch as pyq
from pyqtorch.analog import Observable
from pyqtorch.circuit import QuantumCircuit
from pyqtorch.parametric import Parametric
from pyqtorch.utils import inner_prod, param_dict

logger = getLogger(__name__)


class PSRExpectation(Function):
"""
Describe PSR
chMoussa marked this conversation as resolved.
Show resolved Hide resolved
"""

@staticmethod
@no_grad()
def forward(
ctx: Any,
circuit: QuantumCircuit,
observable: Observable,
state: Tensor,
param_names: list[str],
*param_values: Tensor,
) -> Tensor:
ctx.circuit = circuit
ctx.observable = observable
ctx.param_names = param_names
ctx.state = state
values = param_dict(param_names, param_values)
ctx.out_state = circuit.run(state, values)
ctx.projected_state = observable.run(ctx.out_state, values)
ctx.save_for_backward(*param_values)
return inner_prod(ctx.out_state, ctx.projected_state).real

@staticmethod
def backward(ctx: Any, grad_out: Tensor) -> Tuple[None, ...]:
param_values = ctx.saved_tensors
values = param_dict(ctx.param_names, param_values)
grads_dict = {k: None for k in values.keys()}
shift = torch.tensor(torch.pi) / 2.0

for op in ctx.circuit.flatten():
if isinstance(op, Parametric) and isinstance(op.param_name, str):
spectrum = torch.linalg.eigvals(op.pauli).reshape(-1, 1)
spectral_gap = torch.unique(
torch.abs(torch.tril(spectrum - spectrum.T))
)
spectral_gap = spectral_gap[spectral_gap.nonzero()]
assert (
len(spectral_gap) == 1
), "PSRExpectation only works on single_gap for now."

if values[op.param_name].requires_grad:
with no_grad():
copied_values = values.copy()
copied_values[op.param_name] += shift
f_plus = pyq.expectation(
ctx.circuit, ctx.state, copied_values, ctx.observable
)
copied_values = values.copy()
copied_values[op.param_name] -= shift
f_min = pyq.expectation(
ctx.circuit, ctx.state, copied_values, ctx.observable
)
grad = (
spectral_gap
* (f_plus - f_min)
/ (4 * torch.sin(spectral_gap * shift / 2))
)
grad *= grad_out
if grads_dict[op.param_name] is not None:
grads_dict[op.param_name] += grad
else:
grads_dict[op.param_name] = grad
else:
logger.error(f"PSRExpectation does not support operation: {type(op)}.")
return (None, None, None, None, *grads_dict.values())
1 change: 1 addition & 0 deletions pyqtorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ class DiffMode(StrEnum):
"""An implementation of "Efficient calculation of gradients
in classical simulations of variational quantum algorithms",
Jones & Gacon, 2020"""

GPSR = "gpsr"
"""To be added."""

Expand Down
44 changes: 44 additions & 0 deletions tests/test_circuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,3 +310,47 @@ def test_sample_run() -> None:
assert torch.allclose(wf, product_state("1100"))
assert torch.allclose(pyq.QuantumCircuit(4, [pyq.I(0)]).run("1100"), wf)
assert "1100" in samples[0]


def test_all_diff() -> None:
rx = pyq.RX(0, param_name="theta_0")
rz = pyq.RZ(2, param_name="theta_1")
cnot = pyq.CNOT(1, 2)
ops = [rx, rz, cnot]
n_qubits = 3
circ = pyq.QuantumCircuit(n_qubits, ops)
obs = pyq.QuantumCircuit(n_qubits, [pyq.Z(0)])

theta_0_value = torch.pi / 2
theta_1_value = torch.pi

state = pyq.zero_state(n_qubits)

theta_0 = torch.tensor([theta_0_value], requires_grad=True)

theta_1 = torch.tensor([theta_1_value], requires_grad=True)

values = {"theta_0": theta_0, "theta_1": theta_1}

exp_ad = expectation(circ, state, values, obs, DiffMode.AD)
exp_adjoint = expectation(circ, state, values, obs, DiffMode.ADJOINT)
exp_gpsr = expectation(circ, state, values, obs, DiffMode.GPSR)

grad_ad = torch.autograd.grad(
exp_ad, tuple(values.values()), torch.ones_like(exp_ad)
)

grad_adjoint = torch.autograd.grad(
exp_adjoint, tuple(values.values()), torch.ones_like(exp_adjoint)
)

grad_gpsr = torch.autograd.grad(
exp_gpsr, tuple(values.values()), torch.ones_like(exp_gpsr)
)

assert len(grad_ad) == len(grad_adjoint) == len(grad_gpsr)

for i in range(len(grad_ad)):
assert torch.allclose(grad_ad[i], grad_adjoint[i]) and torch.allclose(
grad_ad[i], grad_gpsr[i]
)
Loading