From 041290cf518247d4fdf5ab332560b57fa2f9d88b Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Wed, 15 Nov 2023 10:46:08 +0200 Subject: [PATCH 1/7] working version of semi-local addressing --- qadence/analog/addressing.py | 138 +++++++++++++++++++++++++++++ qadence/analog/interaction.py | 76 +++++++++++++--- qadence/analog/utils.py | 20 +++++ qadence/backends/pulser/backend.py | 7 +- qadence/backends/pulser/config.py | 4 + qadence/backends/pulser/pulses.py | 44 +++++++++ qadence/parameters.py | 10 ++- tests/analog/test_patterns.py | 125 ++++++++++++++++++++++++++ 8 files changed, 406 insertions(+), 18 deletions(-) create mode 100644 qadence/analog/addressing.py create mode 100644 tests/analog/test_patterns.py diff --git a/qadence/analog/addressing.py b/qadence/analog/addressing.py new file mode 100644 index 000000000..ab68a38a4 --- /dev/null +++ b/qadence/analog/addressing.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +from dataclasses import dataclass +from warnings import warn + +import sympy +import torch +from numpy import pi + +from qadence.parameters import Parameter, evaluate +from qadence.types import StrEnum + +DEFAULT_MAX_AMPLITUDE = 2 * pi * 3 +DEFAULT_MAX_DETUNING = 2 * pi * 20 + + +class WeightConstraint(StrEnum): + """Supported types of constraints for addressing weights.""" + + NORMALIZE = "normalize" + """Normalize weights so that they sum up to 1.""" + + RESTRICT = "restrict" + """Restrict weight values to interval [0, 1].""" + + +def sigmoid(x: torch.Tensor, a: float, b: float) -> sympy.Expr: + return 1.0 / (1.0 + sympy.exp(-a * (x + b))) + + +@dataclass +class AddressingPattern: + # number of qubits + n_qubits: int + + # list of weights for fixed amplitude pattern that cannot be changed during the execution + weights_amp: dict[int, float | torch.Tensor | Parameter] + + # list of weights for fixed detuning pattern that cannot be changed during the execution + weights_det: dict[int, float | torch.Tensor | Parameter] + + # maximum amplitude can also be chosen as a variational parameter if needed + max_amp: float | torch.Tensor | Parameter = DEFAULT_MAX_AMPLITUDE + + # maximum detuning can also be chosen as a variational parameter if needed + max_det: float | torch.Tensor | Parameter = DEFAULT_MAX_DETUNING + + # weight constraint + weight_constraint: WeightConstraint = WeightConstraint.NORMALIZE + + def _normalize_weights(self) -> None: + self.weights_amp = { + k: Parameter(v) if not isinstance(v, Parameter) else abs(v) + for k, v in self.weights_amp.items() + } + sum_weights_amp = sum(list(self.weights_amp.values())) + self.weights_amp = {k: v / sum_weights_amp for k, v in self.weights_amp.items()} + + self.weights_det = { + k: Parameter(v) if not isinstance(v, Parameter) else abs(v) + for k, v in self.weights_det.items() + } + sum_weights_det = sum(list(self.weights_det.values())) + self.weights_det = {k: v / sum_weights_det for k, v in self.weights_det.items()} + + def _restrict_weights(self) -> None: + self.weights_amp = { + k: v * (sigmoid(v, 20, 0.0) - sigmoid(v, 20.0, -1.0)) + for k, v in self.weights_amp.items() + } + self.weights_det = { + k: v * (sigmoid(v, 20.0, 0.0) - sigmoid(v, 20.0, -1.0)) + for k, v in self.weights_det.items() + } + + def _restrict_max_vals(self) -> None: + self.max_amp = self.max_amp * ( + sympy.Heaviside(self.max_amp) - sympy.Heaviside(self.max_amp - DEFAULT_MAX_AMPLITUDE) + ) + self.max_det = self.max_det * ( + sympy.Heaviside(self.max_det) - sympy.Heaviside(self.max_det - DEFAULT_MAX_DETUNING) + ) + + def __post_init__(self) -> None: + # validate weights + if all([not isinstance(v, Parameter) for v in self.weights_amp.values()]): + if not torch.isclose( + torch.tensor(list(self.weights_amp.values())).sum(), + torch.tensor(1.0), + atol=1e-3, + ): + raise ValueError("Amplitude addressing pattern weights must sum to 1.0") + if all([not isinstance(v, Parameter) for v in self.weights_det.values()]): + if not torch.isclose( + torch.tensor(list(self.weights_det.values())).sum(), + torch.tensor(1.0), + atol=1e-3, + ): + raise ValueError("Detuning addressing pattern weights must sum to 1.0") + + # validate detuning value + if not isinstance(self.max_amp, Parameter): + if self.max_amp > DEFAULT_MAX_AMPLITUDE: + warn("Maximum absolute value of amplitude is exceeded") + if not isinstance(self.max_det, Parameter): + if self.max_det > DEFAULT_MAX_DETUNING: + warn("Maximum absolute value of detuning is exceeded") + + # augment weight dicts if needed + self.weights_amp = { + i: Parameter(0.0) if i not in self.weights_amp else self.weights_amp[i] + for i in range(self.n_qubits) + } + self.weights_det = { + i: Parameter(0.0) if i not in self.weights_det else self.weights_det[i] + for i in range(self.n_qubits) + } + + # apply weight constraint + if self.weight_constraint == WeightConstraint.NORMALIZE: + self._normalize_weights() + elif self.weight_constraint == WeightConstraint.RESTRICT: + self._restrict_weights() + else: + raise ValueError("Weight constraint type not found.") + + # restrict max amplitude and detuning to strict interval + self._restrict_max_vals() + + # validate number of qubits in mask + if max(list(self.weights_amp.keys())) >= self.n_qubits: + raise ValueError("Amplitude weight specified for non-existing qubit") + if max(list(self.weights_det.keys())) >= self.n_qubits: + raise ValueError("Detuning weight specified for non-existing qubit") + + def evaluate(self, weights: dict, values: dict) -> dict: + # evaluate weight expressions with actual values + return {k: evaluate(v, values, as_torch=True).flatten() for k, v in weights.items()} # type: ignore [union-attr] diff --git a/qadence/analog/interaction.py b/qadence/analog/interaction.py index 16e5cc9fb..a36f2b84b 100644 --- a/qadence/analog/interaction.py +++ b/qadence/analog/interaction.py @@ -7,7 +7,8 @@ import torch -from qadence.analog.utils import ising_interaction, rot_generator, xy_interaction +from qadence.analog.addressing import AddressingPattern +from qadence.analog.utils import add_pattern, ising_interaction, rot_generator, xy_interaction from qadence.blocks.abstract import AbstractBlock from qadence.blocks.analog import ( AnalogBlock, @@ -49,6 +50,7 @@ def add_interaction( x: Register | QuantumCircuit | AbstractBlock, *args: Any, interaction: Interaction | Callable = Interaction.NN, + pattern: AddressingPattern | None = None, ) -> QuantumCircuit | AbstractBlock: """Turns blocks or circuits into (a chain of) `HamEvo` blocks. @@ -68,6 +70,7 @@ def add_interaction( combinations are accepted. interaction: Type of interaction that is added. Can also be a function that accepts a register and a list of edges that define which qubits interact (see the examples). + pattern: pattern for emulating semi-local addressing Examples: ```python exec="on" source="material-block" result="json" @@ -128,6 +131,7 @@ def _( register: Register, block: AbstractBlock, interaction: Union[Interaction, Callable] = Interaction.NN, + pattern: Union[AddressingPattern, None] = None, ) -> AbstractBlock: try: fn = interaction if callable(interaction) else INTERACTIONS[Interaction(interaction)] @@ -135,64 +139,108 @@ def _( raise KeyError( "Function `add_interaction` only supports NN and XY, or a custom callable function." ) - return _add_interaction(block, register, fn) # type: ignore[arg-type] + return _add_interaction(block, register, fn, pattern) # type: ignore[arg-type] @singledispatch -def _add_interaction(b: AbstractBlock, r: Register, interaction: Callable) -> AbstractBlock: +def _add_interaction( + b: AbstractBlock, + r: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: raise NotImplementedError(f"Cannot emulate {type(b)}") @_add_interaction.register -def _(b: CompositeBlock, r: Register, i: Callable) -> AbstractBlock: - return _construct(type(b), tuple(map(lambda b: _add_interaction(b, r, i), b.blocks))) +def _( + b: CompositeBlock, + r: Register, + i: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: + return _construct(type(b), tuple(map(lambda b: _add_interaction(b, r, i, pattern), b.blocks))) @_add_interaction.register -def _(block: ScaleBlock, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: ScaleBlock, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: if isinstance(block.block, AnalogBlock): raise NotImplementedError("Scaling emulated analog blocks is not implemented.") return block @_add_interaction.register -def _(block: PrimitiveBlock, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: PrimitiveBlock, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: return block @_add_interaction.register -def _(block: WaitBlock, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: WaitBlock, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: duration = block.parameters.duration support = tuple(range(register.n_qubits)) assert support == block.qubit_support if not block.qubit_support.is_global else True pairs = list(filter(lambda x: x[0] < x[1], product(support, support))) - return HamEvo(interaction(register, pairs), duration / 1000) if len(pairs) else I(0) + p_terms = add_pattern(register, pattern) + generator = interaction(register, pairs) + p_terms + + return HamEvo(generator, duration / 1000) if len(pairs) else I(0) @_add_interaction.register -def _(block: ConstantAnalogRotation, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: ConstantAnalogRotation, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: # convert "global" to indexed qubit suppport so that we can re-use `kron` dispatched function b = deepcopy(block) b.qubit_support = QubitSupport(*range(register.n_qubits)) - return _add_interaction(kron(b), register, interaction) + return _add_interaction(kron(b), register, interaction, pattern) @_add_interaction.register -def _(block: AnalogKron, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: AnalogKron, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: from qadence import block_to_tensor w_block = wait(duration=block.duration, qubit_support=block.qubit_support) i_terms = add_interaction(register, w_block, interaction=interaction) + p_terms = add_pattern(register, pattern) generator = add(rot_generator(b) for b in block.blocks if isinstance(b, ConstantAnalogRotation)) - generator = generator if i_terms == I(0) else generator + i_terms.generator # type: ignore[attr-defined] # noqa: E501 + generator = generator if i_terms == I(0) else generator + i_terms.generator + p_terms # type: ignore[attr-defined] # noqa: E501 norm = torch.norm(block_to_tensor(generator)).item() return HamEvo(generator / norm, norm * block.duration / 1000) @_add_interaction.register -def _(block: AnalogChain, register: Register, interaction: Callable) -> AbstractBlock: +def _( + block: AnalogChain, + register: Register, + interaction: Callable, + pattern: Union[AddressingPattern, None], +) -> AbstractBlock: return chain(add_interaction(register, b, interaction=interaction) for b in block.blocks) diff --git a/qadence/analog/utils.py b/qadence/analog/utils.py index 7247fdcfb..1587e2c52 100644 --- a/qadence/analog/utils.py +++ b/qadence/analog/utils.py @@ -1,9 +1,11 @@ from __future__ import annotations from math import dist as euclidean_distance +from typing import Union from sympy import cos, sin +from qadence.analog.addressing import AddressingPattern from qadence.blocks.abstract import AbstractBlock from qadence.blocks.analog import ( ConstantAnalogRotation, @@ -130,3 +132,21 @@ def rot_generator(block: ConstantAnalogRotation) -> AbstractBlock: x_terms = (omega / 2) * add(cos(phase) * X(i) - sin(phase) * Y(i) for i in support) z_terms = delta * add(N(i) for i in support) return x_terms - z_terms # type: ignore[no-any-return] + + +def add_pattern(register: Register, pattern: Union[AddressingPattern, None]) -> AbstractBlock: + support = tuple(range(register.n_qubits)) + if pattern is not None: + max_amp = pattern.max_amp + max_det = pattern.max_det + weights_amp = pattern.weights_amp + weights_det = pattern.weights_det + else: + max_amp = 0.0 + max_det = 0.0 + weights_amp = {i: 0.0 for i in support} + weights_det = {i: 0.0 for i in support} + + p_drive_terms = (1 / 2) * max_amp * add(X(i) * weights_amp[i] for i in support) + p_detuning_terms = -max_det * add(N(i) * weights_det[i] for i in support) + return p_drive_terms + p_detuning_terms # type: ignore[no-any-return] diff --git a/qadence/backends/pulser/backend.py b/qadence/backends/pulser/backend.py index 33b53f2ed..e34a89d48 100644 --- a/qadence/backends/pulser/backend.py +++ b/qadence/backends/pulser/backend.py @@ -35,7 +35,7 @@ from .config import Configuration from .convert_ops import convert_observable from .devices import Device, IdealDevice, RealisticDevice -from .pulses import add_pulses +from .pulses import add_addressing_pattern, add_pulses logger = get_logger(__file__) @@ -91,7 +91,6 @@ def make_sequence(circ: QuantumCircuit, config: Configuration) -> Sequence: sequence.declare_channel(LOCAL_CHANNEL, "rydberg_local", initial_target=0) add_pulses(sequence, circ.block, config, circ.register) - sequence.measure() return sequence @@ -213,6 +212,8 @@ def run( for i, param_values_el in enumerate(vals): sequence = self.assign_parameters(circuit, param_values_el) + add_addressing_pattern(sequence, self.config) + sequence.measure() sim_result = simulate_sequence(sequence, self.config, state, n_shots=None) wf = ( sim_result.get_final_state( # type:ignore [union-attr] @@ -281,6 +282,8 @@ def sample( samples = [] for param_values_el in vals: sequence = self.assign_parameters(circuit, param_values_el) + add_addressing_pattern(sequence, self.config) + sequence.measure() sample = simulate_sequence(sequence, self.config, state, n_shots=n_shots) samples.append(sample) if endianness != self.native_endianness: diff --git a/qadence/backends/pulser/config.py b/qadence/backends/pulser/config.py index fbf5c54d5..7b56efa06 100644 --- a/qadence/backends/pulser/config.py +++ b/qadence/backends/pulser/config.py @@ -7,6 +7,7 @@ from pasqal_cloud.device import EmulatorType from pulser_simulation.simconfig import SimConfig +from qadence.analog.addressing import AddressingPattern from qadence.backend import BackendConfiguration from qadence.blocks.analog import Interaction @@ -85,6 +86,9 @@ class Configuration(BackendConfiguration): # configuration for cloud simulations cloud_configuration: Optional[CloudConfiguration] = None + addressing_pattern: AddressingPattern | None = None + """Semi-local addressing pattern.""" + def __post_init__(self) -> None: super().__post_init__() if self.sim_config is not None and not isinstance(self.sim_config, SimConfig): diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py index 00119060e..a5ba708cf 100644 --- a/qadence/backends/pulser/pulses.py +++ b/qadence/backends/pulser/pulses.py @@ -42,6 +42,50 @@ ] +def add_addressing_pattern( + sequence: Sequence, + config: Configuration, +) -> None: + total_duration = sequence.get_duration() + n_qubits = len(sequence.register.qubits) + + support = tuple(range(n_qubits)) + if config.addressing_pattern is not None: + max_amp = config.addressing_pattern.max_amp + max_det = config.addressing_pattern.max_det + weights_amp = config.addressing_pattern.weights_amp + weights_det = config.addressing_pattern.weights_det + else: + max_amp = 0.0 + max_det = 0.0 + weights_amp = {i: 0.0 for i in support} + weights_det = {i: 0.0 for i in support} + + for i in support: + # declare separate local channel for each qubit + sequence.declare_channel(f"ch_q{i}", "rydberg_local", initial_target=0) + + # add amplitude and detuning patterns + for i in support: + w_amp = ( + evaluate(weights_amp[i]) + if weights_amp[i].is_number # type: ignore [union-attr] + else sequence.declare_variable(f"w-amp-{i}") + ) + w_det = ( + evaluate(weights_det[i]) + if weights_det[i].is_number # type: ignore [union-attr] + else sequence.declare_variable(f"w-det-{i}") + ) + omega = max_amp * w_amp + detuning = -max_det * w_det + pulse = Pulse.ConstantPulse( + duration=total_duration, amplitude=omega, detuning=detuning, phase=0 + ) + sequence.target(i, f"ch_q{i}") + sequence.add(pulse, f"ch_q{i}", protocol="no-delay") + + def add_pulses( sequence: Sequence, block: AbstractBlock, diff --git a/qadence/parameters.py b/qadence/parameters.py index 4a4d4ddd7..fde0f2744 100644 --- a/qadence/parameters.py +++ b/qadence/parameters.py @@ -9,7 +9,7 @@ from sympy import * from sympy import Array, Basic, Expr, Symbol, sympify from sympytorch import SymPyModule -from torch import Tensor, rand, tensor +from torch import Tensor, heaviside, no_grad, rand, tensor from qadence.logger import get_logger from qadence.types import TNumber @@ -197,7 +197,13 @@ def torchify(expr: Expr) -> SymPyModule: Returns: A torchified, differentiable Expression. """ - extra_funcs = {sympy.core.numbers.ImaginaryUnit: 1.0j} + + def heaviside_func(x: Tensor, _: Any) -> Tensor: + with no_grad(): + res = heaviside(x, tensor(0.5)) + return res + + extra_funcs = {sympy.core.numbers.ImaginaryUnit: 1.0j, sympy.Heaviside: heaviside_func} return SymPyModule(expressions=[sympy.N(expr)], extra_funcs=extra_funcs) diff --git a/tests/analog/test_patterns.py b/tests/analog/test_patterns.py new file mode 100644 index 000000000..5f70169ff --- /dev/null +++ b/tests/analog/test_patterns.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import pytest +import torch +from metrics import ATOL_DICT + +from qadence import ( + AnalogRX, + BackendName, + DifferentiableBackend, + DiffMode, + Parameter, + QuantumCircuit, + QuantumModel, + total_magnetization, +) +from qadence.analog.addressing import AddressingPattern +from qadence.analog.interaction import add_interaction +from qadence.backends.pulser.backend import Backend as PulserBackend +from qadence.backends.pulser.config import Configuration +from qadence.backends.pyqtorch.backend import Backend as PyqBackend + + +@pytest.mark.parametrize( + "max_amp,max_det", + [(0.0, 10.0), (15.0, 0.0), (15.0, 9.0)], +) +@pytest.mark.parametrize( + "spacing", + [8.0, 30.0], +) +def test_pulser_pyq_addressing(max_amp: float, max_det: float, spacing: float) -> None: + n_qubits = 3 + block = AnalogRX("x") + circ = QuantumCircuit(n_qubits, block) + + # define addressing patterns + rand_weights_amp = torch.rand(n_qubits) + rand_weights_amp = rand_weights_amp / rand_weights_amp.sum() + w_amp = {i: rand_weights_amp[i] for i in range(n_qubits)} + rand_weights_det = torch.rand(n_qubits) + rand_weights_det = rand_weights_det / rand_weights_det.sum() + w_det = {i: rand_weights_det[i] for i in range(n_qubits)} + p = AddressingPattern( + n_qubits=n_qubits, + max_det=max_det, + max_amp=max_amp, + weights_det=w_det, + weights_amp=w_amp, + ) + + values = {"x": torch.linspace(0.5, 2 * torch.pi, 50)} + obs = total_magnetization(n_qubits) + conf = Configuration(addressing_pattern=p, spacing=spacing) + + # define pulser backend + pulser_backend = PulserBackend(config=conf) # type: ignore[arg-type] + conv = pulser_backend.convert(circ, obs) + pulser_circ, pulser_obs, embedding_fn, params = conv + diff_backend = DifferentiableBackend(pulser_backend, diff_mode=DiffMode.GPSR) + expval_pulser = diff_backend.expectation(pulser_circ, pulser_obs, embedding_fn(params, values)) + + # define pyq backend + int_circ = add_interaction(circ, spacing=spacing, pattern=p) + pyq_backend = PyqBackend() # type: ignore[arg-type] + conv = pyq_backend.convert(int_circ, obs) + pyq_circ, pyq_obs, embedding_fn, params = conv + diff_backend = DifferentiableBackend(pyq_backend, diff_mode=DiffMode.AD) + expval_pyq = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) + + torch.allclose(expval_pulser, expval_pyq, atol=ATOL_DICT[BackendName.PULSER]) + + +@pytest.mark.flaky(max_runs=10) +def test_addressing_training() -> None: + n_qubits = 3 + spacing = 8 + f_value = torch.rand(1) + + # define training parameters + w_amp = {i: Parameter(f"w_amp{i}", trainable=True) for i in range(n_qubits)} + w_det = {i: Parameter(f"w_det{i}", trainable=True) for i in range(n_qubits)} + max_amp = Parameter("max_amp", trainable=True) + max_det = Parameter("max_det", trainable=True) + p = AddressingPattern( + n_qubits=n_qubits, + max_det=max_det, + max_amp=max_amp, + weights_det=w_det, # type: ignore [arg-type] + weights_amp=w_amp, # type: ignore [arg-type] + ) + + # define training circuit + circ = QuantumCircuit(n_qubits, AnalogRX(1 + torch.rand(1).item())) + circ = add_interaction(circ, spacing=spacing, pattern=p) + + # define quantum model + obs = total_magnetization(n_qubits) + model = QuantumModel(circuit=circ, observable=obs) + + # prepare for training + optimizer = torch.optim.Adam(model.parameters(), lr=0.25) + loss_criterion = torch.nn.MSELoss() + n_epochs = 100 + loss_save = [] + + # train model + for _ in range(n_epochs): + optimizer.zero_grad() + out = model.expectation({}) + loss = loss_criterion(f_value, out) + loss.backward() + optimizer.step() + loss_save.append(loss.item()) + + # get final results + f_value_model = model.expectation({}).detach() + + assert torch.all( + torch.tensor(list(p.evaluate(p.weights_amp, model.vparams).values())) > 0.0 + ) and torch.all(torch.tensor(list(p.evaluate(p.weights_amp, model.vparams).values())) < 1.0) + assert torch.all( + torch.tensor(list(p.evaluate(p.weights_det, model.vparams).values())) > 0.0 + ) and torch.all(torch.tensor(list(p.evaluate(p.weights_det, model.vparams).values())) < 1.0) + assert torch.isclose(f_value, f_value_model, atol=ATOL_DICT[BackendName.PULSER]) From 4d06c64a9e20545eaa28f3296de410ff510fa4d7 Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Wed, 15 Nov 2023 12:46:13 +0200 Subject: [PATCH 2/7] fixing tests --- qadence/analog/interaction.py | 5 ++--- qadence/analog/utils.py | 4 ++-- qadence/backends/pulser/devices.py | 3 +-- qadence/backends/pulser/pulses.py | 6 +++--- tests/analog/test_analog_emulation.py | 4 ++-- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/qadence/analog/interaction.py b/qadence/analog/interaction.py index a36f2b84b..533d3944b 100644 --- a/qadence/analog/interaction.py +++ b/qadence/analog/interaction.py @@ -226,11 +226,10 @@ def _( from qadence import block_to_tensor w_block = wait(duration=block.duration, qubit_support=block.qubit_support) - i_terms = add_interaction(register, w_block, interaction=interaction) - p_terms = add_pattern(register, pattern) + i_terms = add_interaction(register, w_block, interaction=interaction, pattern=pattern) generator = add(rot_generator(b) for b in block.blocks if isinstance(b, ConstantAnalogRotation)) - generator = generator if i_terms == I(0) else generator + i_terms.generator + p_terms # type: ignore[attr-defined] # noqa: E501 + generator = generator if i_terms == I(0) else generator + i_terms.generator # type: ignore[attr-defined] # noqa: E501 norm = torch.norm(block_to_tensor(generator)).item() return HamEvo(generator / norm, norm * block.duration / 1000) diff --git a/qadence/analog/utils.py b/qadence/analog/utils.py index 1587e2c52..e0e222871 100644 --- a/qadence/analog/utils.py +++ b/qadence/analog/utils.py @@ -11,7 +11,7 @@ ConstantAnalogRotation, ) from qadence.blocks.utils import add, kron -from qadence.operations import N, X, Y +from qadence.operations import I, N, X, Y, Z from qadence.register import Register # Ising coupling coefficient depending on the Rydberg level @@ -148,5 +148,5 @@ def add_pattern(register: Register, pattern: Union[AddressingPattern, None]) -> weights_det = {i: 0.0 for i in support} p_drive_terms = (1 / 2) * max_amp * add(X(i) * weights_amp[i] for i in support) - p_detuning_terms = -max_det * add(N(i) * weights_det[i] for i in support) + p_detuning_terms = -max_det * add(0.5 * (I(i) - Z(i)) * weights_det[i] for i in support) return p_drive_terms + p_detuning_terms # type: ignore[no-any-return] diff --git a/qadence/backends/pulser/devices.py b/qadence/backends/pulser/devices.py index 4acbf01d7..522d64ee3 100644 --- a/qadence/backends/pulser/devices.py +++ b/qadence/backends/pulser/devices.py @@ -3,7 +3,6 @@ from numpy import pi from pulser.channels.channels import Rydberg from pulser.channels.eom import RydbergBeam, RydbergEOM -from pulser.devices._device_datacls import Device as PulserDevice from pulser.devices._device_datacls import VirtualDevice from qadence.types import StrEnum @@ -24,7 +23,7 @@ # device with realistic specs with local channels and custom bandwith. -RealisticDevice = PulserDevice( +RealisticDevice = VirtualDevice( name="RealisticDevice", dimensions=2, rydberg_level=60, diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py index a5ba708cf..fa60b64e4 100644 --- a/qadence/backends/pulser/pulses.py +++ b/qadence/backends/pulser/pulses.py @@ -10,7 +10,7 @@ from pulser.sequence.sequence import Sequence from pulser.waveforms import CompositeWaveform, ConstantWaveform, RampWaveform -from qadence import Register +from qadence import Parameter, Register from qadence.blocks import AbstractBlock, CompositeBlock from qadence.blocks.analog import ( AnalogBlock, @@ -58,8 +58,8 @@ def add_addressing_pattern( else: max_amp = 0.0 max_det = 0.0 - weights_amp = {i: 0.0 for i in support} - weights_det = {i: 0.0 for i in support} + weights_amp = {i: Parameter(0.0) for i in support} + weights_det = {i: Parameter(0.0) for i in support} for i in support: # declare separate local channel for each qubit diff --git a/tests/analog/test_analog_emulation.py b/tests/analog/test_analog_emulation.py index 771e83e86..156b63d44 100644 --- a/tests/analog/test_analog_emulation.py +++ b/tests/analog/test_analog_emulation.py @@ -112,7 +112,7 @@ def test_mixing_digital_analog() -> None: def test_custom_interaction_function() -> None: circuit = QuantumCircuit(2, wait(duration=100)) emulated = add_interaction(circuit, interaction=lambda reg, pairs: I(0)) - assert emulated.block == HamEvo(I(0), 100 / 1000) + assert emulated.block == HamEvo(kron(I(0), I(1)), 100 / 1000) m = QuantumModel(circuit, configuration={"interaction": lambda reg, pairs: I(0)}) - assert m._circuit.abstract.block == HamEvo(I(0), 100 / 1000) + assert m._circuit.abstract.block == HamEvo(kron(I(0), I(1)), 100 / 1000) From a4042802f02cc613f13a49bca7036f52e82b166b Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Fri, 17 Nov 2023 22:10:46 +0200 Subject: [PATCH 3/7] added local and global constraints --- qadence/analog/addressing.py | 175 +++++++++++++++++------------- qadence/analog/utils.py | 33 ++++-- qadence/backends/pulser/pulses.py | 20 +++- tests/analog/test_patterns.py | 32 +++--- 4 files changed, 157 insertions(+), 103 deletions(-) diff --git a/qadence/analog/addressing.py b/qadence/analog/addressing.py index ab68a38a4..9c74bb448 100644 --- a/qadence/analog/addressing.py +++ b/qadence/analog/addressing.py @@ -10,8 +10,10 @@ from qadence.parameters import Parameter, evaluate from qadence.types import StrEnum -DEFAULT_MAX_AMPLITUDE = 2 * pi * 3 -DEFAULT_MAX_DETUNING = 2 * pi * 20 +GLOBAL_MAX_AMPLITUDE = 300 +GLOBAL_MAX_DETUNING = 2 * pi * 2000 +LOCAL_MAX_AMPLITUDE = 3 +LOCAL_MAX_DETUNING = 2 * pi * 20 class WeightConstraint(StrEnum): @@ -39,93 +41,116 @@ class AddressingPattern: # list of weights for fixed detuning pattern that cannot be changed during the execution weights_det: dict[int, float | torch.Tensor | Parameter] - # maximum amplitude can also be chosen as a variational parameter if needed - max_amp: float | torch.Tensor | Parameter = DEFAULT_MAX_AMPLITUDE - - # maximum detuning can also be chosen as a variational parameter if needed - max_det: float | torch.Tensor | Parameter = DEFAULT_MAX_DETUNING - - # weight constraint - weight_constraint: WeightConstraint = WeightConstraint.NORMALIZE - - def _normalize_weights(self) -> None: - self.weights_amp = { - k: Parameter(v) if not isinstance(v, Parameter) else abs(v) - for k, v in self.weights_amp.items() + # amplitude can also be chosen as a variational parameter if needed + amp: float | torch.Tensor | Parameter = LOCAL_MAX_AMPLITUDE + + # detuning can also be chosen as a variational parameter if needed + det: float | torch.Tensor | Parameter = LOCAL_MAX_DETUNING + + def _validate_weights( + self, + weights: dict[int, float | torch.Tensor | Parameter], + ) -> None: + for v in weights.values(): + if not isinstance(v, Parameter): + if not (v >= 0.0 and v <= 1.0): + raise ValueError("Addressing pattern weights must sum fall in range [0.0, 1.0]") + + def _constrain_weights( + self, + weights: dict[int, float | torch.Tensor | Parameter], + ) -> dict: + # augment weight dict if needed + weights = { + i: Parameter(0.0) + if i not in weights + else (Parameter(weights[i]) if not isinstance(weights[i], Parameter) else weights[i]) + for i in range(self.n_qubits) } - sum_weights_amp = sum(list(self.weights_amp.values())) - self.weights_amp = {k: v / sum_weights_amp for k, v in self.weights_amp.items()} - self.weights_det = { - k: Parameter(v) if not isinstance(v, Parameter) else abs(v) - for k, v in self.weights_det.items() + # restrict weights to [0, 1] range + weights = { + k: abs(v * (sigmoid(v, 20, 1.0) - sigmoid(v, 20.0, -1.0))) for k, v in weights.items() } - sum_weights_det = sum(list(self.weights_det.values())) - self.weights_det = {k: v / sum_weights_det for k, v in self.weights_det.items()} - def _restrict_weights(self) -> None: - self.weights_amp = { - k: v * (sigmoid(v, 20, 0.0) - sigmoid(v, 20.0, -1.0)) - for k, v in self.weights_amp.items() - } - self.weights_det = { - k: v * (sigmoid(v, 20.0, 0.0) - sigmoid(v, 20.0, -1.0)) - for k, v in self.weights_det.items() + return weights + + def _constrain_max_vals(self) -> None: + # enforce constraints: + # 0 <= amp <= GLOBAL_MAX_AMPLITUDE + # 0 <= abs(det) <= GLOBAL_MAX_DETUNING + self.amp = abs( + self.amp + * ( + sympy.Heaviside(self.amp + GLOBAL_MAX_AMPLITUDE) + - sympy.Heaviside(self.amp - GLOBAL_MAX_AMPLITUDE) + ) + ) + self.det = -abs( + self.det + * ( + sympy.Heaviside(self.det + GLOBAL_MAX_DETUNING) + - sympy.Heaviside(self.det - GLOBAL_MAX_DETUNING) + ) + ) + + def _create_local_constraint(self, val: sympy.Expr, weights: dict, max_val: float) -> dict: + # enforce local constraints: + # amp * w_amp_i < LOCAL_MAX_AMPLITUDE or + # abs(det) * w_det_i < LOCAL_MAX_DETUNING + local_constr = {k: val * v for k, v in weights.items()} + local_constr = { + k: sympy.Heaviside(v) - sympy.Heaviside(v - max_val) for k, v in local_constr.items() } - def _restrict_max_vals(self) -> None: - self.max_amp = self.max_amp * ( - sympy.Heaviside(self.max_amp) - sympy.Heaviside(self.max_amp - DEFAULT_MAX_AMPLITUDE) - ) - self.max_det = self.max_det * ( - sympy.Heaviside(self.max_det) - sympy.Heaviside(self.max_det - DEFAULT_MAX_DETUNING) + return local_constr + + def _create_global_constraint( + self, val: sympy.Expr, weights: dict, max_val: float + ) -> sympy.Expr: + # enforce global constraints: + # amp * sum(w_amp_0, w_amp_1, ...) < GLOBAL_MAX_AMPLITUDE or + # abs(det) * sum(w_det_0, w_det_1, ...) < GLOBAL_MAX_DETUNING + weighted_vals_global = val * sum([v for v in weights.values()]) + weighted_vals_global = sympy.Heaviside(weighted_vals_global) - sympy.Heaviside( + weighted_vals_global - max_val ) + return weighted_vals_global + def __post_init__(self) -> None: - # validate weights - if all([not isinstance(v, Parameter) for v in self.weights_amp.values()]): - if not torch.isclose( - torch.tensor(list(self.weights_amp.values())).sum(), - torch.tensor(1.0), - atol=1e-3, - ): - raise ValueError("Amplitude addressing pattern weights must sum to 1.0") - if all([not isinstance(v, Parameter) for v in self.weights_det.values()]): - if not torch.isclose( - torch.tensor(list(self.weights_det.values())).sum(), - torch.tensor(1.0), - atol=1e-3, - ): - raise ValueError("Detuning addressing pattern weights must sum to 1.0") - - # validate detuning value - if not isinstance(self.max_amp, Parameter): - if self.max_amp > DEFAULT_MAX_AMPLITUDE: + # validate amplitude/detuning weights + self._validate_weights(self.weights_amp) + self._validate_weights(self.weights_det) + + # validate maximum global amplitude/detuning values + if not isinstance(self.amp, Parameter): + if self.amp > GLOBAL_MAX_AMPLITUDE: warn("Maximum absolute value of amplitude is exceeded") - if not isinstance(self.max_det, Parameter): - if self.max_det > DEFAULT_MAX_DETUNING: + if not isinstance(self.det, Parameter): + if abs(self.det) > GLOBAL_MAX_DETUNING: warn("Maximum absolute value of detuning is exceeded") - # augment weight dicts if needed - self.weights_amp = { - i: Parameter(0.0) if i not in self.weights_amp else self.weights_amp[i] - for i in range(self.n_qubits) - } - self.weights_det = { - i: Parameter(0.0) if i not in self.weights_det else self.weights_det[i] - for i in range(self.n_qubits) - } + # constrain amplitude/detuning parameterized weights to [0.0, 1.0] interval + self.weights_amp = self._constrain_weights(self.weights_amp) + self.weights_det = self._constrain_weights(self.weights_det) - # apply weight constraint - if self.weight_constraint == WeightConstraint.NORMALIZE: - self._normalize_weights() - elif self.weight_constraint == WeightConstraint.RESTRICT: - self._restrict_weights() - else: - raise ValueError("Weight constraint type not found.") + # constrain max global amplitude and detuning to strict interval + self._constrain_max_vals() - # restrict max amplitude and detuning to strict interval - self._restrict_max_vals() + # create additional local and global constraints for amplitude/detuning masks + self.local_constr_amp = self._create_local_constraint( + self.amp, self.weights_amp, LOCAL_MAX_AMPLITUDE + ) + self.local_constr_det = self._create_local_constraint( + -self.det, self.weights_det, LOCAL_MAX_DETUNING + ) + self.global_constr_amp = self._create_global_constraint( + self.amp, self.weights_amp, GLOBAL_MAX_AMPLITUDE + ) + self.global_constr_det = self._create_global_constraint( + -self.det, self.weights_det, GLOBAL_MAX_DETUNING + ) # validate number of qubits in mask if max(list(self.weights_amp.keys())) >= self.n_qubits: diff --git a/qadence/analog/utils.py b/qadence/analog/utils.py index e0e222871..98e3b4586 100644 --- a/qadence/analog/utils.py +++ b/qadence/analog/utils.py @@ -137,16 +137,33 @@ def rot_generator(block: ConstantAnalogRotation) -> AbstractBlock: def add_pattern(register: Register, pattern: Union[AddressingPattern, None]) -> AbstractBlock: support = tuple(range(register.n_qubits)) if pattern is not None: - max_amp = pattern.max_amp - max_det = pattern.max_det + amp = pattern.amp + det = pattern.det weights_amp = pattern.weights_amp weights_det = pattern.weights_det + local_constr_amp = pattern.local_constr_amp + local_constr_det = pattern.local_constr_det + global_constr_amp = pattern.global_constr_amp + global_constr_det = pattern.global_constr_det else: - max_amp = 0.0 - max_det = 0.0 + amp = 0.0 + det = 0.0 weights_amp = {i: 0.0 for i in support} weights_det = {i: 0.0 for i in support} - - p_drive_terms = (1 / 2) * max_amp * add(X(i) * weights_amp[i] for i in support) - p_detuning_terms = -max_det * add(0.5 * (I(i) - Z(i)) * weights_det[i] for i in support) - return p_drive_terms + p_detuning_terms # type: ignore[no-any-return] + local_constr_amp = {i: 0.0 for i in support} + local_constr_det = {i: 0.0 for i in support} + global_constr_amp = 0.0 + global_constr_det = 0.0 + + p_amp_terms = ( + (1 / 2) + * amp + * global_constr_amp + * add(X(i) * weights_amp[i] * local_constr_amp[i] for i in support) + ) + p_det_terms = ( + -det + * global_constr_det + * add(0.5 * (I(i) - Z(i)) * weights_det[i] * local_constr_det[i] for i in support) + ) + return p_amp_terms + p_det_terms # type: ignore[no-any-return] diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py index fa60b64e4..7687b56d1 100644 --- a/qadence/backends/pulser/pulses.py +++ b/qadence/backends/pulser/pulses.py @@ -51,15 +51,23 @@ def add_addressing_pattern( support = tuple(range(n_qubits)) if config.addressing_pattern is not None: - max_amp = config.addressing_pattern.max_amp - max_det = config.addressing_pattern.max_det + amp = config.addressing_pattern.amp + det = config.addressing_pattern.det weights_amp = config.addressing_pattern.weights_amp weights_det = config.addressing_pattern.weights_det + local_constr_amp = config.addressing_pattern.local_constr_amp + local_constr_det = config.addressing_pattern.local_constr_det + global_constr_amp = config.addressing_pattern.global_constr_amp + global_constr_det = config.addressing_pattern.global_constr_det else: - max_amp = 0.0 - max_det = 0.0 + amp = 0.0 + det = 0.0 weights_amp = {i: Parameter(0.0) for i in support} weights_det = {i: Parameter(0.0) for i in support} + local_constr_amp = {i: Parameter(0.0) for i in support} + local_constr_det = {i: Parameter(0.0) for i in support} + global_constr_amp = 0.0 + global_constr_det = 0.0 for i in support: # declare separate local channel for each qubit @@ -77,8 +85,8 @@ def add_addressing_pattern( if weights_det[i].is_number # type: ignore [union-attr] else sequence.declare_variable(f"w-det-{i}") ) - omega = max_amp * w_amp - detuning = -max_det * w_det + omega = amp * w_amp + detuning = -det * w_det pulse = Pulse.ConstantPulse( duration=total_duration, amplitude=omega, detuning=detuning, phase=0 ) diff --git a/tests/analog/test_patterns.py b/tests/analog/test_patterns.py index 5f70169ff..0788fae11 100644 --- a/tests/analog/test_patterns.py +++ b/tests/analog/test_patterns.py @@ -22,14 +22,14 @@ @pytest.mark.parametrize( - "max_amp,max_det", + "amp,det", [(0.0, 10.0), (15.0, 0.0), (15.0, 9.0)], ) @pytest.mark.parametrize( "spacing", [8.0, 30.0], ) -def test_pulser_pyq_addressing(max_amp: float, max_det: float, spacing: float) -> None: +def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: n_qubits = 3 block = AnalogRX("x") circ = QuantumCircuit(n_qubits, block) @@ -43,8 +43,8 @@ def test_pulser_pyq_addressing(max_amp: float, max_det: float, spacing: float) - w_det = {i: rand_weights_det[i] for i in range(n_qubits)} p = AddressingPattern( n_qubits=n_qubits, - max_det=max_det, - max_amp=max_amp, + det=det, + amp=amp, weights_det=w_det, weights_amp=w_amp, ) @@ -80,12 +80,12 @@ def test_addressing_training() -> None: # define training parameters w_amp = {i: Parameter(f"w_amp{i}", trainable=True) for i in range(n_qubits)} w_det = {i: Parameter(f"w_det{i}", trainable=True) for i in range(n_qubits)} - max_amp = Parameter("max_amp", trainable=True) - max_det = Parameter("max_det", trainable=True) + amp = Parameter("amp", trainable=True) + det = Parameter("det", trainable=True) p = AddressingPattern( n_qubits=n_qubits, - max_det=max_det, - max_amp=max_amp, + det=det, + amp=amp, weights_det=w_det, # type: ignore [arg-type] weights_amp=w_amp, # type: ignore [arg-type] ) @@ -116,10 +116,14 @@ def test_addressing_training() -> None: # get final results f_value_model = model.expectation({}).detach() - assert torch.all( - torch.tensor(list(p.evaluate(p.weights_amp, model.vparams).values())) > 0.0 - ) and torch.all(torch.tensor(list(p.evaluate(p.weights_amp, model.vparams).values())) < 1.0) - assert torch.all( - torch.tensor(list(p.evaluate(p.weights_det, model.vparams).values())) > 0.0 - ) and torch.all(torch.tensor(list(p.evaluate(p.weights_det, model.vparams).values())) < 1.0) + weights_amp = torch.tensor(list(p.evaluate(p.weights_amp, model.vparams).values())) + weights_amp_mask = weights_amp.abs() < 0.001 + weights_amp[weights_amp_mask] = 0.0 + + weights_det = torch.tensor(list(p.evaluate(p.weights_det, model.vparams).values())) + weights_det_mask = weights_det.abs() < 0.001 + weights_det[weights_det_mask] = 0.0 + + assert torch.all(weights_amp >= 0.0) and torch.all(weights_amp <= 1.0) + assert torch.all(weights_det >= 0.0) and torch.all(weights_det <= 1.0) assert torch.isclose(f_value, f_value_model, atol=ATOL_DICT[BackendName.PULSER]) From dcff819803ee0b39d2247517a397f79ccb59cd8c Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Mon, 20 Nov 2023 12:41:10 +0200 Subject: [PATCH 4/7] refactored tests --- qadence/analog/interaction.py | 4 ++- tests/analog/test_patterns.py | 51 +++++++++++++++++++---------------- 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/qadence/analog/interaction.py b/qadence/analog/interaction.py index 533d3944b..80ccb6b2b 100644 --- a/qadence/analog/interaction.py +++ b/qadence/analog/interaction.py @@ -242,4 +242,6 @@ def _( interaction: Callable, pattern: Union[AddressingPattern, None], ) -> AbstractBlock: - return chain(add_interaction(register, b, interaction=interaction) for b in block.blocks) + return chain( + add_interaction(register, b, interaction=interaction, pattern=pattern) for b in block.blocks + ) diff --git a/tests/analog/test_patterns.py b/tests/analog/test_patterns.py index 0788fae11..14ad67aac 100644 --- a/tests/analog/test_patterns.py +++ b/tests/analog/test_patterns.py @@ -6,19 +6,19 @@ from qadence import ( AnalogRX, + AnalogRY, BackendName, - DifferentiableBackend, DiffMode, Parameter, QuantumCircuit, QuantumModel, + Register, + chain, total_magnetization, ) from qadence.analog.addressing import AddressingPattern from qadence.analog.interaction import add_interaction -from qadence.backends.pulser.backend import Backend as PulserBackend from qadence.backends.pulser.config import Configuration -from qadence.backends.pyqtorch.backend import Backend as PyqBackend @pytest.mark.parametrize( @@ -31,8 +31,10 @@ ) def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: n_qubits = 3 - block = AnalogRX("x") - circ = QuantumCircuit(n_qubits, block) + x = Parameter("x") + block = chain(AnalogRX(3 * x), AnalogRY(0.5 * x)) + reg = Register(support=n_qubits, spacing=spacing) + circ = QuantumCircuit(reg, block) # define addressing patterns rand_weights_amp = torch.rand(n_qubits) @@ -51,22 +53,24 @@ def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: values = {"x": torch.linspace(0.5, 2 * torch.pi, 50)} obs = total_magnetization(n_qubits) - conf = Configuration(addressing_pattern=p, spacing=spacing) + conf = Configuration(addressing_pattern=p) # define pulser backend - pulser_backend = PulserBackend(config=conf) # type: ignore[arg-type] - conv = pulser_backend.convert(circ, obs) - pulser_circ, pulser_obs, embedding_fn, params = conv - diff_backend = DifferentiableBackend(pulser_backend, diff_mode=DiffMode.GPSR) - expval_pulser = diff_backend.expectation(pulser_circ, pulser_obs, embedding_fn(params, values)) + model = QuantumModel( + circuit=circ, + observable=obs, + backend=BackendName.PULSER, + diff_mode=DiffMode.GPSR, + configuration=conf, + ) + expval_pulser = model.expectation(values=values) # define pyq backend - int_circ = add_interaction(circ, spacing=spacing, pattern=p) - pyq_backend = PyqBackend() # type: ignore[arg-type] - conv = pyq_backend.convert(int_circ, obs) - pyq_circ, pyq_obs, embedding_fn, params = conv - diff_backend = DifferentiableBackend(pyq_backend, diff_mode=DiffMode.AD) - expval_pyq = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) + int_circ = add_interaction(circ, pattern=p) + model = QuantumModel( + circuit=int_circ, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + expval_pyq = model.expectation(values=values) torch.allclose(expval_pulser, expval_pyq, atol=ATOL_DICT[BackendName.PULSER]) @@ -74,7 +78,7 @@ def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: @pytest.mark.flaky(max_runs=10) def test_addressing_training() -> None: n_qubits = 3 - spacing = 8 + reg = Register(support=n_qubits, spacing=8) f_value = torch.rand(1) # define training parameters @@ -91,17 +95,18 @@ def test_addressing_training() -> None: ) # define training circuit - circ = QuantumCircuit(n_qubits, AnalogRX(1 + torch.rand(1).item())) - circ = add_interaction(circ, spacing=spacing, pattern=p) + block = chain(AnalogRX(1 + torch.rand(1).item()), AnalogRY(1 + torch.rand(1).item())) + circ = QuantumCircuit(reg, block) + circ = add_interaction(circ, pattern=p) # define quantum model obs = total_magnetization(n_qubits) - model = QuantumModel(circuit=circ, observable=obs) + model = QuantumModel(circuit=circ, observable=obs, backend=BackendName.PYQTORCH) # prepare for training - optimizer = torch.optim.Adam(model.parameters(), lr=0.25) + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) loss_criterion = torch.nn.MSELoss() - n_epochs = 100 + n_epochs = 200 loss_save = [] # train model From bb55399285696ec4f93ac20be677603c11fc8330 Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Mon, 20 Nov 2023 12:59:56 +0200 Subject: [PATCH 5/7] raise exception in pulser backend when passed parametrized pattern weights --- qadence/backends/pulser/pulses.py | 32 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py index 7687b56d1..7b628eba3 100644 --- a/qadence/backends/pulser/pulses.py +++ b/qadence/backends/pulser/pulses.py @@ -55,19 +55,11 @@ def add_addressing_pattern( det = config.addressing_pattern.det weights_amp = config.addressing_pattern.weights_amp weights_det = config.addressing_pattern.weights_det - local_constr_amp = config.addressing_pattern.local_constr_amp - local_constr_det = config.addressing_pattern.local_constr_det - global_constr_amp = config.addressing_pattern.global_constr_amp - global_constr_det = config.addressing_pattern.global_constr_det else: amp = 0.0 det = 0.0 weights_amp = {i: Parameter(0.0) for i in support} weights_det = {i: Parameter(0.0) for i in support} - local_constr_amp = {i: Parameter(0.0) for i in support} - local_constr_det = {i: Parameter(0.0) for i in support} - global_constr_amp = 0.0 - global_constr_det = 0.0 for i in support: # declare separate local channel for each qubit @@ -75,16 +67,20 @@ def add_addressing_pattern( # add amplitude and detuning patterns for i in support: - w_amp = ( - evaluate(weights_amp[i]) - if weights_amp[i].is_number # type: ignore [union-attr] - else sequence.declare_variable(f"w-amp-{i}") - ) - w_det = ( - evaluate(weights_det[i]) - if weights_det[i].is_number # type: ignore [union-attr] - else sequence.declare_variable(f"w-det-{i}") - ) + if weights_amp[i].is_number: # type: ignore [union-attr] + w_amp = evaluate(weights_amp[i]) + else: + raise ValueError( + "Pulser backend currently doesn't support parametrized amplitude pattern weights." + ) + + if weights_det[i].is_number: # type: ignore [union-attr] + w_det = evaluate(weights_det[i]) + else: + raise ValueError( + "Pulser backend currently doesn't support parametrized detuning pattern weights." + ) + omega = amp * w_amp detuning = -det * w_det pulse = Pulse.ConstantPulse( From 6f2cea8cd0def4f31e16b080bb0dba2914753a2c Mon Sep 17 00:00:00 2001 From: Vytautas Abramavicius Date: Tue, 28 Nov 2023 09:36:39 +0200 Subject: [PATCH 6/7] added addressing pattern doc --- .../semi-local-addressing.md | 172 ++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 docs/digital_analog_qc/semi-local-addressing.md diff --git a/docs/digital_analog_qc/semi-local-addressing.md b/docs/digital_analog_qc/semi-local-addressing.md new file mode 100644 index 000000000..c569d98fb --- /dev/null +++ b/docs/digital_analog_qc/semi-local-addressing.md @@ -0,0 +1,172 @@ + + +## Physics behind semi-local addressing patterns + +Recall that in Qadence the general neutral-atom Hamiltonian for a set of $n$ interacting qubits is given by expression + +$$ +\mathcal{H} = \mathcal{H}_{\rm drive} + \mathcal{H}_{\rm int} = \sum_{i=0}^{n-1}\left(\mathcal{H}^\text{d}_{i}(t) + \sum_{j Date: Tue, 28 Nov 2023 13:42:50 +0200 Subject: [PATCH 7/7] fixed tests; added trainable weights definition with strings --- .../semi-local-addressing.md | 14 ++--- qadence/analog/addressing.py | 52 ++++++++----------- qadence/analog/utils.py | 8 +-- qadence/backends/pulser/pulses.py | 16 ++++-- tests/analog/test_patterns.py | 18 ++++--- 5 files changed, 57 insertions(+), 51 deletions(-) diff --git a/docs/digital_analog_qc/semi-local-addressing.md b/docs/digital_analog_qc/semi-local-addressing.md index c569d98fb..647baeda7 100644 --- a/docs/digital_analog_qc/semi-local-addressing.md +++ b/docs/digital_analog_qc/semi-local-addressing.md @@ -116,18 +116,18 @@ expval_pulser = model.expectation(values=values) ### Trainable weights -Since the user can specify both the maximal detuning/amplitude value of the addressing pattern and the corresponding weights, it is natural to make these parameters variational in order to use them in some QML setting. This can be achieved by defining pattern weights as trainable `Parameter` instances. +Since the user can specify both the maximal detuning/amplitude value of the addressing pattern and the corresponding weights, it is natural to make these parameters variational in order to use them in some QML setting. This can be achieved by defining pattern weights as trainable `Parameter` instances or strings specifying weight names. ```python exec="on" source="material-block" session="emu" n_qubits = 3 reg = Register(support=n_qubits, spacing=8) f_value = torch.rand(1) -# define training parameters -w_amp = {i: Parameter(f"w_amp{i}", trainable=True) for i in range(n_qubits)} -w_det = {i: Parameter(f"w_det{i}", trainable=True) for i in range(n_qubits)} -amp = Parameter("amp", trainable=True) -det = Parameter("det", trainable=True) +# define training parameters as strings +w_amp = {i: f"w_amp{i}" for i in range(n_qubits)} +w_det = {i: f"w_det{i}" for i in range(n_qubits)} +amp = "amp" +det = "det" p = AddressingPattern( n_qubits=n_qubits, det=det, @@ -137,7 +137,7 @@ p = AddressingPattern( ) # define training circuit -block = chain(AnalogRX(1 + torch.rand(1).item()), AnalogRY(1 + torch.rand(1).item())) +block = AnalogRX(1 + torch.rand(1).item()) circ = QuantumCircuit(reg, block) circ = add_interaction(circ, pattern=p) diff --git a/qadence/analog/addressing.py b/qadence/analog/addressing.py index 9c74bb448..91faa5c75 100644 --- a/qadence/analog/addressing.py +++ b/qadence/analog/addressing.py @@ -8,7 +8,6 @@ from numpy import pi from qadence.parameters import Parameter, evaluate -from qadence.types import StrEnum GLOBAL_MAX_AMPLITUDE = 300 GLOBAL_MAX_DETUNING = 2 * pi * 2000 @@ -16,49 +15,39 @@ LOCAL_MAX_DETUNING = 2 * pi * 20 -class WeightConstraint(StrEnum): - """Supported types of constraints for addressing weights.""" - - NORMALIZE = "normalize" - """Normalize weights so that they sum up to 1.""" - - RESTRICT = "restrict" - """Restrict weight values to interval [0, 1].""" - - def sigmoid(x: torch.Tensor, a: float, b: float) -> sympy.Expr: return 1.0 / (1.0 + sympy.exp(-a * (x + b))) @dataclass class AddressingPattern: - # number of qubits n_qubits: int + """Number of qubits in register.""" - # list of weights for fixed amplitude pattern that cannot be changed during the execution - weights_amp: dict[int, float | torch.Tensor | Parameter] + weights_amp: dict[int, str | float | torch.Tensor | Parameter] + """List of weights for fixed amplitude pattern that cannot be changed during the execution.""" - # list of weights for fixed detuning pattern that cannot be changed during the execution - weights_det: dict[int, float | torch.Tensor | Parameter] + weights_det: dict[int, str | float | torch.Tensor | Parameter] + """List of weights for fixed detuning pattern that cannot be changed during the execution.""" - # amplitude can also be chosen as a variational parameter if needed - amp: float | torch.Tensor | Parameter = LOCAL_MAX_AMPLITUDE + amp: str | float | torch.Tensor | Parameter = LOCAL_MAX_AMPLITUDE + """Maximal amplitude of the amplitude pattern felt by a single qubit.""" - # detuning can also be chosen as a variational parameter if needed - det: float | torch.Tensor | Parameter = LOCAL_MAX_DETUNING + det: str | float | torch.Tensor | Parameter = LOCAL_MAX_DETUNING + """Maximal detuning of the detuning pattern felt by a single qubit.""" def _validate_weights( self, - weights: dict[int, float | torch.Tensor | Parameter], + weights: dict[int, str | float | torch.Tensor | Parameter], ) -> None: for v in weights.values(): - if not isinstance(v, Parameter): + if not isinstance(v, (str, Parameter)): if not (v >= 0.0 and v <= 1.0): raise ValueError("Addressing pattern weights must sum fall in range [0.0, 1.0]") def _constrain_weights( self, - weights: dict[int, float | torch.Tensor | Parameter], + weights: dict[int, str | float | torch.Tensor | Parameter], ) -> dict: # augment weight dict if needed weights = { @@ -68,9 +57,10 @@ def _constrain_weights( for i in range(self.n_qubits) } - # restrict weights to [0, 1] range + # restrict weights to [0, 1] range - equal to 0 everywhere else weights = { - k: abs(v * (sigmoid(v, 20, 1.0) - sigmoid(v, 20.0, -1.0))) for k, v in weights.items() + k: v if v.is_number else abs(v * (sigmoid(v, 20, 1) - sigmoid(v, 20.0, -1))) # type: ignore [union-attr] + for k, v in weights.items() } return weights @@ -82,8 +72,8 @@ def _constrain_max_vals(self) -> None: self.amp = abs( self.amp * ( - sympy.Heaviside(self.amp + GLOBAL_MAX_AMPLITUDE) - - sympy.Heaviside(self.amp - GLOBAL_MAX_AMPLITUDE) + sympy.Heaviside(self.amp + GLOBAL_MAX_AMPLITUDE) # type: ignore [operator] + - sympy.Heaviside(self.amp - GLOBAL_MAX_AMPLITUDE) # type: ignore [operator] ) ) self.det = -abs( @@ -124,12 +114,16 @@ def __post_init__(self) -> None: self._validate_weights(self.weights_det) # validate maximum global amplitude/detuning values - if not isinstance(self.amp, Parameter): + if not isinstance(self.amp, (str, Parameter)): if self.amp > GLOBAL_MAX_AMPLITUDE: warn("Maximum absolute value of amplitude is exceeded") - if not isinstance(self.det, Parameter): + elif isinstance(self.amp, str): + self.amp = Parameter(self.amp, trainable=True) + if not isinstance(self.det, (str, Parameter)): if abs(self.det) > GLOBAL_MAX_DETUNING: warn("Maximum absolute value of detuning is exceeded") + elif isinstance(self.det, str): + self.det = Parameter(self.det, trainable=True) # constrain amplitude/detuning parameterized weights to [0.0, 1.0] interval self.weights_amp = self._constrain_weights(self.weights_amp) diff --git a/qadence/analog/utils.py b/qadence/analog/utils.py index 98e3b4586..d464f3cdf 100644 --- a/qadence/analog/utils.py +++ b/qadence/analog/utils.py @@ -156,14 +156,14 @@ def add_pattern(register: Register, pattern: Union[AddressingPattern, None]) -> global_constr_det = 0.0 p_amp_terms = ( - (1 / 2) + (1 / 2) # type: ignore [operator] * amp * global_constr_amp - * add(X(i) * weights_amp[i] * local_constr_amp[i] for i in support) + * add(X(i) * weights_amp[i] * local_constr_amp[i] for i in support) # type: ignore [operator] ) p_det_terms = ( - -det + -det # type: ignore [operator] * global_constr_det - * add(0.5 * (I(i) - Z(i)) * weights_det[i] * local_constr_det[i] for i in support) + * add(0.5 * (I(i) - Z(i)) * weights_det[i] * local_constr_det[i] for i in support) # type: ignore [operator] ) return p_amp_terms + p_det_terms # type: ignore[no-any-return] diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py index 7b628eba3..02ed96734 100644 --- a/qadence/backends/pulser/pulses.py +++ b/qadence/backends/pulser/pulses.py @@ -55,11 +55,19 @@ def add_addressing_pattern( det = config.addressing_pattern.det weights_amp = config.addressing_pattern.weights_amp weights_det = config.addressing_pattern.weights_det + local_constr_amp = config.addressing_pattern.local_constr_amp + local_constr_det = config.addressing_pattern.local_constr_det + global_constr_amp = config.addressing_pattern.global_constr_amp + global_constr_det = config.addressing_pattern.global_constr_det else: amp = 0.0 det = 0.0 weights_amp = {i: Parameter(0.0) for i in support} weights_det = {i: Parameter(0.0) for i in support} + local_constr_amp = {i: 0.0 for i in support} + local_constr_det = {i: 0.0 for i in support} + global_constr_amp = 0.0 + global_constr_det = 0.0 for i in support: # declare separate local channel for each qubit @@ -68,21 +76,21 @@ def add_addressing_pattern( # add amplitude and detuning patterns for i in support: if weights_amp[i].is_number: # type: ignore [union-attr] - w_amp = evaluate(weights_amp[i]) + w_amp = evaluate(weights_amp[i], as_torch=True) * local_constr_amp[i] else: raise ValueError( "Pulser backend currently doesn't support parametrized amplitude pattern weights." ) if weights_det[i].is_number: # type: ignore [union-attr] - w_det = evaluate(weights_det[i]) + w_det = evaluate(weights_det[i], as_torch=True) * local_constr_det[i] else: raise ValueError( "Pulser backend currently doesn't support parametrized detuning pattern weights." ) - omega = amp * w_amp - detuning = -det * w_det + omega = global_constr_amp * amp * w_amp + detuning = global_constr_det * det * w_det pulse = Pulse.ConstantPulse( duration=total_duration, amplitude=omega, detuning=detuning, phase=0 ) diff --git a/tests/analog/test_patterns.py b/tests/analog/test_patterns.py index 14ad67aac..b0d18349b 100644 --- a/tests/analog/test_patterns.py +++ b/tests/analog/test_patterns.py @@ -2,7 +2,7 @@ import pytest import torch -from metrics import ATOL_DICT +from metrics import LOW_ACCEPTANCE, MIDDLE_ACCEPTANCE from qadence import ( AnalogRX, @@ -19,6 +19,7 @@ from qadence.analog.addressing import AddressingPattern from qadence.analog.interaction import add_interaction from qadence.backends.pulser.config import Configuration +from qadence.states import equivalent_state @pytest.mark.parametrize( @@ -63,6 +64,7 @@ def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: diff_mode=DiffMode.GPSR, configuration=conf, ) + wf_pulser = model.run(values=values) expval_pulser = model.expectation(values=values) # define pyq backend @@ -70,9 +72,11 @@ def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None: model = QuantumModel( circuit=int_circ, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD ) + wf_pyq = model.run(values=values) expval_pyq = model.expectation(values=values) - torch.allclose(expval_pulser, expval_pyq, atol=ATOL_DICT[BackendName.PULSER]) + assert equivalent_state(wf_pulser, wf_pyq, atol=MIDDLE_ACCEPTANCE) + assert torch.allclose(expval_pulser, expval_pyq, atol=MIDDLE_ACCEPTANCE) @pytest.mark.flaky(max_runs=10) @@ -82,10 +86,10 @@ def test_addressing_training() -> None: f_value = torch.rand(1) # define training parameters - w_amp = {i: Parameter(f"w_amp{i}", trainable=True) for i in range(n_qubits)} - w_det = {i: Parameter(f"w_det{i}", trainable=True) for i in range(n_qubits)} - amp = Parameter("amp", trainable=True) - det = Parameter("det", trainable=True) + w_amp = {i: f"w_amp{i}" for i in range(n_qubits)} + w_det = {i: f"w_det{i}" for i in range(n_qubits)} + amp = "amp" + det = "det" p = AddressingPattern( n_qubits=n_qubits, det=det, @@ -131,4 +135,4 @@ def test_addressing_training() -> None: assert torch.all(weights_amp >= 0.0) and torch.all(weights_amp <= 1.0) assert torch.all(weights_det >= 0.0) and torch.all(weights_det <= 1.0) - assert torch.isclose(f_value, f_value_model, atol=ATOL_DICT[BackendName.PULSER]) + assert torch.isclose(f_value, f_value_model, atol=LOW_ACCEPTANCE)