-
Notifications
You must be signed in to change notification settings - Fork 21
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Co-authored-by: Kaonan Micadei <[email protected]> Co-authored-by: Vytautas Abramavicius <[email protected]>
- Loading branch information
1 parent
1f1c65d
commit 73825cf
Showing
8 changed files
with
729 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
from __future__ import annotations | ||
|
||
from .backend import Backend, Configuration | ||
from .devices import Device | ||
from .pulses import supported_gates |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,242 @@ | ||
from __future__ import annotations | ||
|
||
from collections import Counter | ||
from dataclasses import dataclass | ||
from typing import Any | ||
|
||
import numpy as np | ||
import qutip | ||
import torch | ||
from pulser import Register as PulserRegister | ||
from pulser import Sequence | ||
from pulser.pulse import Pulse | ||
from pulser_simulation.simresults import SimulationResults | ||
from pulser_simulation.simulation import QutipEmulator | ||
from torch import Tensor | ||
|
||
from qadence.backend import Backend as BackendInterface | ||
from qadence.backend import BackendName, ConvertedCircuit, ConvertedObservable | ||
from qadence.backends.utils import to_list_of_dicts | ||
from qadence.blocks import AbstractBlock | ||
from qadence.circuit import QuantumCircuit | ||
from qadence.measurements import Measurements | ||
from qadence.overlap import overlap_exact | ||
from qadence.register import Register | ||
from qadence.utils import Endianness | ||
|
||
from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL | ||
from .config import Configuration | ||
from .convert_ops import convert_observable | ||
from .devices import Device, IdealDevice, RealisticDevice | ||
from .pulses import add_pulses | ||
|
||
WEAK_COUPLING_CONST = 1.2 | ||
|
||
DEFAULT_SPACING = 8.0 # µm (standard value) | ||
|
||
|
||
def create_register(register: Register, spacing: float = DEFAULT_SPACING) -> PulserRegister: | ||
"""Create Pulser register instance. | ||
Args: | ||
register (Register): graph representing a register with accompanying coordinate data | ||
spacing (float): distance between qubits in micrometers | ||
Returns: | ||
Register: Pulser register | ||
""" | ||
|
||
# create register from coordinates | ||
coords = np.array(list(register.coords.values())) | ||
return PulserRegister.from_coordinates(coords * spacing) | ||
|
||
|
||
def make_sequence(circ: QuantumCircuit, config: Configuration) -> Sequence: | ||
if config.device_type == Device.IDEALIZED: | ||
device = IdealDevice | ||
elif config.device_type == Device.REALISTIC: | ||
device = RealisticDevice | ||
else: | ||
raise ValueError("Specified device is not supported.") | ||
|
||
max_amp = device.channels["rydberg_global"].max_amp | ||
min_duration = device.channels["rydberg_global"].min_duration | ||
|
||
if config.spacing is not None: | ||
spacing = config.spacing | ||
elif max_amp is not None: | ||
# Ideal spacing for entanglement gate | ||
spacing = WEAK_COUPLING_CONST * device.rydberg_blockade_radius(max_amp) # type: ignore | ||
else: | ||
spacing = DEFAULT_SPACING | ||
|
||
pulser_register = create_register(circ.register, spacing) | ||
|
||
sequence = Sequence(pulser_register, device) | ||
sequence.declare_channel(GLOBAL_CHANNEL, "rydberg_global") | ||
sequence.declare_channel(LOCAL_CHANNEL, "rydberg_local", initial_target=0) | ||
|
||
# add a minimum duration pulse omega=0 pulse at the beginning for simulation convergence reasons | ||
# since Pulser's QutipEmulator doesn't allow simulation of sequences with total duration < 4ns | ||
zero_pulse = Pulse.ConstantPulse( | ||
duration=max(sequence.device.channels["rydberg_global"].min_duration, 4), | ||
amplitude=0.0, | ||
detuning=0.0, | ||
phase=0.0, | ||
) | ||
sequence.add(zero_pulse, GLOBAL_CHANNEL, "wait-for-all") | ||
|
||
add_pulses(sequence, circ.block, config, circ.register, spacing) | ||
sequence.measure() | ||
|
||
return sequence | ||
|
||
|
||
# TODO: make it parallelized | ||
# TODO: add execution on the cloud platform | ||
def simulate_sequence( | ||
sequence: Sequence, config: Configuration, state: Tensor | ||
) -> SimulationResults: | ||
simulation = QutipEmulator.from_sequence( | ||
sequence, | ||
sampling_rate=config.sampling_rate, | ||
config=config.sim_config, | ||
with_modulation=config.with_modulation, | ||
) | ||
if state is not None: | ||
simulation.set_initial_state(qutip.Qobj(state.cpu().numpy())) | ||
|
||
return simulation.run(nsteps=config.n_steps_solv, method=config.method_solv) | ||
|
||
|
||
@dataclass(frozen=True, eq=True) | ||
class Backend(BackendInterface): | ||
"""The Pulser backend""" | ||
|
||
name: BackendName = BackendName.PULSER | ||
supports_ad: bool = False | ||
support_bp: bool = False | ||
is_remote: bool = False | ||
with_measurements: bool = True | ||
with_noise: bool = False | ||
native_endianness: Endianness = Endianness.BIG | ||
config: Configuration = Configuration() | ||
|
||
def circuit(self, circ: QuantumCircuit) -> Sequence: | ||
native = make_sequence(circ, self.config) | ||
|
||
return ConvertedCircuit(native=native, abstract=circ, original=circ) | ||
|
||
def observable(self, observable: AbstractBlock, n_qubits: int = None) -> Tensor: | ||
from qadence.transpile import flatten, scale_primitive_blocks_only, transpile | ||
|
||
# make sure only leaves, i.e. primitive blocks are scaled | ||
block = transpile(flatten, scale_primitive_blocks_only)(observable) | ||
|
||
(native,) = convert_observable(block, n_qubits=n_qubits, config=self.config) | ||
return ConvertedObservable(native=native, abstract=block, original=observable) | ||
|
||
def assign_parameters( | ||
self, | ||
circuit: ConvertedCircuit, | ||
param_values: dict[str, Tensor], | ||
) -> Any: | ||
if param_values == {} and circuit.native.is_parametrized(): | ||
missing = list(circuit.native.declared_variables.keys()) | ||
raise ValueError(f"Please, provide values for the following parameters: {missing}") | ||
|
||
if param_values == {}: | ||
return circuit.native | ||
|
||
numpy_param_values = { | ||
k: v.detach().cpu().numpy() | ||
for (k, v) in param_values.items() | ||
if k in circuit.native.declared_variables | ||
} | ||
|
||
return circuit.native.build(**numpy_param_values) | ||
|
||
def run( | ||
self, | ||
circuit: ConvertedCircuit, | ||
param_values: dict[str, Tensor] = {}, | ||
state: Tensor | None = None, | ||
endianness: Endianness = Endianness.BIG, | ||
) -> Tensor: | ||
vals = to_list_of_dicts(param_values) | ||
|
||
batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128) | ||
|
||
for i, param_values_el in enumerate(vals): | ||
sequence = self.assign_parameters(circuit, param_values_el) | ||
sim_result = simulate_sequence(sequence, self.config, state) | ||
wf = ( | ||
sim_result.get_final_state(ignore_global_phase=False, normalize=True) | ||
.full() | ||
.flatten() | ||
) | ||
|
||
# We flip the wavefunction coming out of pulser, | ||
# essentially changing logic 0 with logic 1 in the basis states. | ||
batched_wf[i] = np.flip(wf) | ||
|
||
batched_wf_torch = torch.from_numpy(batched_wf) | ||
|
||
if endianness != self.native_endianness: | ||
from qadence.transpile import invert_endianness | ||
|
||
batched_wf_torch = invert_endianness(batched_wf_torch) | ||
|
||
return batched_wf_torch | ||
|
||
def sample( | ||
self, | ||
circuit: ConvertedCircuit, | ||
param_values: dict[str, Tensor] = {}, | ||
n_shots: int = 1, | ||
state: Tensor | None = None, | ||
endianness: Endianness = Endianness.BIG, | ||
) -> list[Counter]: | ||
if n_shots < 1: | ||
raise ValueError("You can only call sample with n_shots>0.") | ||
|
||
vals = to_list_of_dicts(param_values) | ||
|
||
samples = [] | ||
for param_values_el in vals: | ||
sequence = self.assign_parameters(circuit, param_values_el) | ||
sim_result = simulate_sequence(sequence, self.config, state) | ||
sample = sim_result.sample_final_state(n_shots) | ||
samples.append(sample) | ||
if endianness != self.native_endianness: | ||
from qadence.transpile import invert_endianness | ||
|
||
samples = invert_endianness(samples) | ||
return samples | ||
|
||
def expectation( | ||
self, | ||
circuit: ConvertedCircuit, | ||
observable: list[ConvertedObservable] | ConvertedObservable, | ||
param_values: dict[str, Tensor] = {}, | ||
state: Tensor | None = None, | ||
protocol: Measurements | None = None, | ||
endianness: Endianness = Endianness.BIG, | ||
) -> Tensor: | ||
state = self.run(circuit, param_values=param_values, state=state, endianness=endianness) | ||
|
||
observables = observable if isinstance(observable, list) else [observable] | ||
support = sorted(list(circuit.abstract.register.support)) | ||
res_list = [obs.native(state, param_values, qubit_support=support) for obs in observables] | ||
|
||
res = torch.transpose(torch.stack(res_list), 0, 1).squeeze() | ||
res = res if len(res.shape) > 0 else res.reshape(1) | ||
return res.real | ||
|
||
@staticmethod | ||
def _overlap(bras: Tensor, kets: Tensor) -> Tensor: | ||
return overlap_exact(bras, kets) | ||
|
||
@staticmethod | ||
def default_configuration() -> Configuration: | ||
return Configuration() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
from __future__ import annotations | ||
|
||
from dataclasses import dataclass | ||
|
||
from pulser.channels.channels import Rydberg | ||
|
||
GLOBAL_CHANNEL = "Global" | ||
LOCAL_CHANNEL = "Local" | ||
|
||
|
||
@dataclass(frozen=True) | ||
class CustomRydberg(Rydberg): | ||
name: str = "Rydberg" | ||
|
||
duration_steps: int = 1 # ns | ||
amplitude_steps: float = 0.01 # rad/µs |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
from __future__ import annotations | ||
|
||
from dataclasses import dataclass | ||
from typing import Optional | ||
|
||
from pulser_simulation.simconfig import SimConfig | ||
|
||
from qadence.backend import BackendConfiguration | ||
from qadence.blocks.analog import Interaction | ||
|
||
from .devices import Device | ||
|
||
|
||
@dataclass | ||
class Configuration(BackendConfiguration): | ||
# device type | ||
device_type: Device = Device.IDEALIZED | ||
|
||
# atomic spacing | ||
spacing: Optional[float] = None | ||
|
||
# sampling rate to be used for local simulations | ||
sampling_rate: float = 1.0 | ||
|
||
# solver method to pass to the Qutip solver | ||
method_solv: str = "adams" | ||
|
||
# number of solver steps to pass to the Qutip solver | ||
n_steps_solv: float = 1e8 | ||
|
||
# simulation configuration with optional noise options | ||
sim_config: Optional[SimConfig] = None | ||
|
||
# add modulation to the local execution | ||
with_modulation: bool = False | ||
|
||
# Use gate-level parameters | ||
use_gate_params = True | ||
|
||
# pulse amplitude on local channel | ||
amplitude_local: Optional[float] = None | ||
|
||
# pulse amplitude on global channel | ||
amplitude_global: Optional[float] = None | ||
|
||
# detuning value | ||
detuning: Optional[float] = None | ||
|
||
# interaction type | ||
interaction: Interaction = Interaction.NN | ||
|
||
def __post_init__(self) -> None: | ||
if self.sim_config is not None and not isinstance(self.sim_config, SimConfig): | ||
raise TypeError("Wrong 'sim_config' attribute type, pass a valid SimConfig object!") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
from __future__ import annotations | ||
|
||
from typing import Sequence | ||
|
||
import torch | ||
from torch.nn import Module | ||
|
||
from qadence.blocks import ( | ||
AbstractBlock, | ||
) | ||
from qadence.blocks.block_to_tensor import ( | ||
block_to_tensor, | ||
) | ||
from qadence.utils import Endianness | ||
|
||
from .config import Configuration | ||
|
||
|
||
def convert_observable( | ||
block: AbstractBlock, n_qubits: int | None, config: Configuration = None | ||
) -> Sequence[Module]: | ||
return [PulserObservable(block, n_qubits)] | ||
|
||
|
||
class PulserObservable(Module): | ||
def __init__(self, block: AbstractBlock, n_qubits: int | None): | ||
super().__init__() | ||
self.block = block | ||
self.n_qubits = n_qubits | ||
|
||
def forward( | ||
self, | ||
state: torch.Tensor, | ||
values: dict[str, torch.Tensor] | list = {}, | ||
qubit_support: tuple | None = None, | ||
endianness: Endianness = Endianness.BIG, | ||
) -> torch.Tensor: | ||
# FIXME: cache this, it is very inefficient for non-parametric observables | ||
block_mat = block_to_tensor( | ||
self.block, values, qubit_support=qubit_support, endianness=endianness # type: ignore [arg-type] # noqa | ||
).squeeze(0) | ||
return torch.sum(torch.matmul(state, block_mat) * state.conj(), dim=1) |
Oops, something went wrong.