diff --git a/latest b/latest index 0d687f1e2..1de48e4cd 120000 --- a/latest +++ b/latest @@ -1 +1 @@ -v1.7.3 \ No newline at end of file +v1.7.4 \ No newline at end of file diff --git a/v1.7.4/404.html b/v1.7.4/404.html new file mode 100644 index 000000000..028ee84ce --- /dev/null +++ b/v1.7.4/404.html @@ -0,0 +1,2376 @@ + + + + + + + + + + + + + + + + + + + Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/backends/backend/index.html b/v1.7.4/api/backends/backend/index.html new file mode 100644 index 000000000..0f0afdeb3 --- /dev/null +++ b/v1.7.4/api/backends/backend/index.html @@ -0,0 +1,4223 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Abstract backend - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Abstract backend

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Backend(name, supports_ad, support_bp, supports_adjoint, is_remote, with_measurements, native_endianness, engine, with_noise, config) + + + dataclass + + +

+ + +
+

+ Bases: ABC

+ + +

The abstract class that defines the interface for the backends.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ATTRIBUTEDESCRIPTION
name +
+

backend unique string identifier

+
+

+ + TYPE: + BackendName + +

+
supports_ad +
+

whether or not the backend has a native autograd

+
+

+ + TYPE: + bool + +

+
supports_bp +
+

whether or not the backend has a native backprop

+
+

+ + TYPE: + bool + +

+
supports_adjoint +
+

Does the backend support native adjoint differentation.

+
+

+ + TYPE: + bool + +

+
is_remote +
+

whether computations are executed locally or remotely on this +backend, useful when using cloud platforms where credentials are +needed for example.

+
+

+ + TYPE: + bool + +

+
with_measurements +
+

whether it supports counts or not

+
+

+ + TYPE: + bool + +

+
with_noise +
+

whether to add realistic noise or not

+
+

+ + TYPE: + bool + +

+
native_endianness +
+

The native endianness of the backend

+
+

+ + TYPE: + Endianness + +

+
engine +
+

The underlying (native) automatic differentiation engine of the backend.

+
+

+ + TYPE: + Engine + +

+
+ + + + +
+ + + + + + + + + +
+ + +

+ circuit(circuit) + + + abstractmethod + + +

+ + +
+ +

Converts an abstract QuantumCircuit to the native backend representation.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A circuit, for example: QuantumCircuit(2, X(0))

+
+

+ + TYPE: + QuantumCircuit + +

+
+ + + + + + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConvertedCircuit + + +
+

A converted circuit c. You can access the original, arbstract circuit via c.abstract

+
+
+ + ConvertedCircuit + + +
+

and the converted (or backend native) circuit via c.native.

+
+
+ +
+ Source code in qadence/backend.py +
@abstractmethod
+def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
+    """Converts an abstract `QuantumCircuit` to the native backend representation.
+
+    Arguments:
+        circuit: A circuit, for example: `QuantumCircuit(2, X(0))`
+
+    Returns:
+        A converted circuit `c`. You can access the original, arbstract circuit via `c.abstract`
+        and the converted (or backend *native*) circuit via `c.native`.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ convert(circuit, observable=None) + +

+ + +
+ +

Convert an abstract circuit and an optional observable to their native representation.

+

Additionally, this function constructs an embedding function which maps from +user-facing parameters to device parameters (read more on parameter embedding +here).

+ +
+ Source code in qadence/backend.py +
def convert(
+    self, circuit: QuantumCircuit, observable: list[AbstractBlock] | AbstractBlock | None = None
+) -> Converted:
+    """Convert an abstract circuit and an optional observable to their native representation.
+
+    Additionally, this function constructs an embedding function which maps from
+    user-facing parameters to device parameters (read more on parameter embedding
+    [here][qadence.blocks.embedding.embedding]).
+    """
+
+    def check_observable(obs_obj: Any) -> AbstractBlock:
+        if isinstance(obs_obj, QubitOperator):
+            from qadence.blocks.manipulate import from_openfermion
+
+            assert len(obs_obj.terms) > 0, "Make sure to give a non-empty qubit hamiltonian"
+
+            return from_openfermion(obs_obj)
+
+        elif isinstance(obs_obj, (CompositeBlock, PrimitiveBlock, ScaleBlock)):
+            from qadence.blocks.utils import block_is_qubit_hamiltonian
+
+            assert block_is_qubit_hamiltonian(
+                obs_obj
+            ), "Make sure the QubitHamiltonian consists only of Pauli operators X, Y, Z, I"
+            return obs_obj
+        raise TypeError(
+            "qubit_hamiltonian should be a Pauli-like AbstractBlock or a QubitOperator"
+        )
+
+    conv_circ = self.circuit(circuit)
+    circ_params, circ_embedding_fn = embedding(
+        conv_circ.abstract.block, self.config._use_gate_params, self.engine
+    )
+    params = circ_params
+    if observable is not None:
+        observable = observable if isinstance(observable, list) else [observable]
+        conv_obs = []
+        obs_embedding_fn_list = []
+
+        for obs in observable:
+            obs = check_observable(obs)
+            c_obs = self.observable(obs, max(circuit.n_qubits, obs.n_qubits))
+            obs_params, obs_embedding_fn = embedding(
+                c_obs.abstract, self.config._use_gate_params, self.engine
+            )
+            params.update(obs_params)
+            obs_embedding_fn_list.append(obs_embedding_fn)
+            conv_obs.append(c_obs)
+
+        def embedding_fn_dict(a: dict, b: dict) -> dict:
+            embedding_dict = circ_embedding_fn(a, b)
+            for o in obs_embedding_fn_list:
+                embedding_dict.update(o(a, b))
+            return embedding_dict
+
+        return Converted(conv_circ, conv_obs, embedding_fn_dict, params)
+
+    def embedding_fn(a: dict, b: dict) -> dict:
+        return circ_embedding_fn(a, b)
+
+    return Converted(conv_circ, None, embedding_fn, params)
+
+
+
+ +
+ +
+ + +

+ expectation(circuit, observable, param_values={}, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG) + + + abstractmethod + + +

+ + +
+ +

Compute the expectation value of the circuit with the given observable.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + ParamDictType + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + ArrayLike | None + + + DEFAULT: + None + +

+
measurement +
+

Optional measurement protocol. If None, use +exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ +
+ Source code in qadence/backend.py +
@abstractmethod
+def expectation(
+    self,
+    circuit: ConvertedCircuit,
+    observable: list[ConvertedObservable] | ConvertedObservable,
+    param_values: ParamDictType = {},
+    state: ArrayLike | None = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> ArrayLike:
+    """Compute the expectation value of the `circuit` with the given `observable`.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        state: Initial state.
+        measurement: Optional measurement protocol. If None, use
+            exact expectation value with a statevector simulator.
+        noise: A noise model to use.
+        endianness: Endianness of the resulting bit strings.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ observable(observable, n_qubits) + + + abstractmethod + + +

+ + +
+ +

Converts an abstract observable (which is just an AbstractBlock) to the native backend.

+

representation.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
observable +
+

An observable.

+
+

+ + TYPE: + AbstractBlock + +

+
n_qubits +
+

Number of qubits the observable covers. This is typically circuit.n_qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConvertedObservable + + +
+

A converted observable o. You can access the original, arbstract observable via

+
+
+ + ConvertedObservable + + +
+

o.abstract and the converted (or backend native) observable via o.native.

+
+
+ +
+ Source code in qadence/backend.py +
@abstractmethod
+def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable:
+    """Converts an abstract observable (which is just an `AbstractBlock`) to the native backend.
+
+    representation.
+
+    Arguments:
+        observable: An observable.
+        n_qubits: Number of qubits the observable covers. This is typically `circuit.n_qubits`.
+
+    Returns:
+        A converted observable `o`. You can access the original, arbstract observable via
+        `o.abstract` and the converted (or backend *native*) observable via `o.native`.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ run(circuit, param_values={}, state=None, endianness=Endianness.BIG, *args, **kwargs) + +

+ + +
+ +

Run a circuit and return the resulting wave function.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + dict[str, ArrayLike] + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting wavefunction.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ArrayLike + + +
+

A list of Counter objects where each key represents a bitstring

+
+
+ + ArrayLike + + +
+

and its value the number of times it has been sampled from the given wave function.

+
+
+ +
+ Source code in qadence/backend.py +
def run(
+    self,
+    circuit: ConvertedCircuit,
+    param_values: dict[str, ArrayLike] = {},
+    state: Tensor | None = None,
+    endianness: Endianness = Endianness.BIG,
+    *args: Any,
+    **kwargs: Any,
+) -> ArrayLike:
+    """Run a circuit and return the resulting wave function.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        state: Initial state.
+        endianness: Endianness of the resulting wavefunction.
+
+    Returns:
+        A list of Counter objects where each key represents a bitstring
+        and its value the number of times it has been sampled from the given wave function.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ run_dm(circuit, noise, param_values={}, state=None, endianness=Endianness.BIG) + + + abstractmethod + + +

+ + +
+ +

Run a circuit and return the resulting the density matrix.

+

TODO: Temporary method for the purposes of noise model implementation. +To be removed in a later refactoring.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + dict[str, ArrayLike] + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting density matrix.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A list of Counter objects where each key represents a bitstring

+
+
+ + Tensor + + +
+

and its value the number of times it has been sampled from the given wave function.

+
+
+ +
+ Source code in qadence/backend.py +
@abstractmethod
+def run_dm(
+    self,
+    circuit: ConvertedCircuit,
+    noise: Noise,
+    param_values: dict[str, ArrayLike] = {},
+    state: Tensor | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> Tensor:
+    """Run a circuit and return the resulting the density matrix.
+
+    TODO: Temporary method for the purposes of noise model implementation.
+    To be removed in a later refactoring.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        state: Initial state.
+        endianness: Endianness of the resulting density matrix.
+
+    Returns:
+        A list of Counter objects where each key represents a bitstring
+        and its value the number of times it has been sampled from the given wave function.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ sample(circuit, param_values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG) + + + abstractmethod + + +

+ + +
+ +

Sample bit strings.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + dict[str, Tensor] + + + DEFAULT: + {} + +

+
n_shots +
+

Number of shots to sample.

+
+

+ + TYPE: + int + + + DEFAULT: + 1000 + +

+
state +
+

Initial state.

+
+

+ + TYPE: + ArrayLike | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
mitigation +
+

An error mitigation protocol to apply.

+
+

+ + TYPE: + Mitigations | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ +
+ Source code in qadence/backend.py +
@abstractmethod
+def sample(
+    self,
+    circuit: ConvertedCircuit,
+    param_values: dict[str, Tensor] = {},
+    n_shots: int = 1000,
+    state: ArrayLike | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> list[Counter]:
+    """Sample bit strings.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        n_shots: Number of shots to sample.
+        state: Initial state.
+        noise: A noise model to use.
+        mitigation: An error mitigation protocol to apply.
+        endianness: Endianness of the resulting bit strings.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ BackendConfiguration(_use_gate_params=True, use_sparse_observable=False, use_gradient_checkpointing=False, use_single_qubit_composition=False, transpilation_passes=None) + + + dataclass + + +

+ + +
+ + + + + +
+ + + + + + + + + +
+ + +

+ available_options() + +

+ + +
+ +

Return as a string the available fields with types of the configuration.

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ str + +
+

a string with all the available fields, one per line

+
+

+ + TYPE: + str + +

+
+ +
+ Source code in qadence/backend.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
def available_options(self) -> str:
+    """Return as a string the available fields with types of the configuration.
+
+    Returns:
+        str: a string with all the available fields, one per line
+    """
+    conf_msg = ""
+    for _field in fields(self):
+        if not _field.name.startswith("_"):
+            conf_msg += (
+                f"Name: {_field.name} - Type: {_field.type} - Default value: {_field.default}\n"
+            )
+    return conf_msg
+
+
+
+ +
+ +
+ + +

+ get_param_name(blk) + +

+ + +
+ +

Return parameter names for the current backend.

+

Depending on which backend is in use this +function returns either UUIDs or expressions of parameters.

+ +
+ Source code in qadence/backend.py +
75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
def get_param_name(self, blk: AbstractBlock) -> Tuple[str, ...]:
+    """Return parameter names for the current backend.
+
+    Depending on which backend is in use this
+    function returns either UUIDs or expressions of parameters.
+    """
+    param_ids: Tuple
+    # FIXME: better type hiearchy?
+    types = (TimeEvolutionBlock, ParametricBlock, ConstantAnalogRotation, InteractionBlock)
+    if not isinstance(blk, types):
+        raise TypeError(f"Can not infer param name from {type(blk)}")
+    else:
+        if self._use_gate_params:
+            param_ids = tuple(blk.parameters.uuids())
+        else:
+            param_ids = tuple(map(stringify, blk.parameters.expressions()))
+    return param_ids
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/backends/differentiable/index.html b/v1.7.4/api/backends/differentiable/index.html new file mode 100644 index 000000000..19b29e6ea --- /dev/null +++ b/v1.7.4/api/backends/differentiable/index.html @@ -0,0 +1,3371 @@ + + + + + + + + + + + + + + + + + + + + + + + DifferentiableBackend - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DifferentiableBackend

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DifferentiableBackend(backend, diff_mode=DiffMode.AD, **psr_args) + +

+ + +
+

+ Bases: DifferentiableBackend

+ + +

A class which wraps a QuantumBackend with the automatic differentation engine TORCH.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
backend +
+

An instance of the QuantumBackend type perform execution.

+
+

+ + TYPE: + Backend + +

+
diff_mode +
+

A differentiable mode supported by the differentiation engine.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
**psr_args +
+

Arguments that will be passed on to DifferentiableExpectation.

+
+

+ + TYPE: + int | float | None + + + DEFAULT: + {} + +

+
+ +
+ Source code in qadence/engines/torch/differentiable_backend.py +
27
+28
+29
+30
+31
+32
+33
+34
def __init__(
+    self,
+    backend: QuantumBackend,
+    diff_mode: DiffMode = DiffMode.AD,
+    **psr_args: int | float | None,
+) -> None:
+    super().__init__(backend=backend, engine=Engine.TORCH, diff_mode=diff_mode)
+    self.psr_args = psr_args
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ expectation(circuit, observable, param_values={}, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG) + +

+ + +
+ +

Compute the expectation value of the circuit with the given observable.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
observable +
+

A converted observable as returned by backend.observable.

+
+

+ + TYPE: + list[ConvertedObservable] | ConvertedObservable + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + ParamDictType + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + ArrayLike | None + + + DEFAULT: + None + +

+
measurement +
+

Optional measurement protocol. If None, use +exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
mitigation +
+

The error mitigation to use.

+
+

+ + TYPE: + Mitigations | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ +
+ Source code in qadence/engines/torch/differentiable_backend.py +
36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
def expectation(
+    self,
+    circuit: ConvertedCircuit,
+    observable: list[ConvertedObservable] | ConvertedObservable,
+    param_values: ParamDictType = {},
+    state: ArrayLike | None = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> ArrayLike:
+    """Compute the expectation value of the `circuit` with the given `observable`.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        observable: A converted observable as returned by `backend.observable`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        state: Initial state.
+        measurement: Optional measurement protocol. If None, use
+            exact expectation value with a statevector simulator.
+        noise: A noise model to use.
+        mitigation: The error mitigation to use.
+        endianness: Endianness of the resulting bit strings.
+    """
+    observable = observable if isinstance(observable, list) else [observable]
+    differentiable_expectation = DifferentiableExpectation(
+        backend=self.backend,
+        circuit=circuit,
+        observable=observable,
+        param_values=param_values,
+        state=state,
+        measurement=measurement,
+        noise=noise,
+        mitigation=mitigation,
+        endianness=endianness,
+    )
+
+    if self.diff_mode == DiffMode.AD:
+        expectation = differentiable_expectation.ad
+    elif self.diff_mode == DiffMode.ADJOINT:
+        expectation = differentiable_expectation.adjoint
+    else:
+        try:
+            fns = get_gpsr_fns()
+            psr_fn = fns[self.diff_mode]
+        except KeyError:
+            raise ValueError(f"{self.diff_mode} differentiation mode is not supported")
+        expectation = partial(differentiable_expectation.psr, psr_fn=psr_fn, **self.psr_args)
+    return expectation()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DifferentiableBackend(backend, diff_mode=DiffMode.AD, **psr_args) + +

+ + +
+

+ Bases: DifferentiableBackend

+ + +

A class which wraps a QuantumBackend with the automatic differentation engine JAX.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
backend +
+

An instance of the QuantumBackend type perform execution.

+
+

+ + TYPE: + Backend + +

+
diff_mode +
+

A differentiable mode supported by the differentiation engine.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
**psr_args +
+

Arguments that will be passed on to DifferentiableExpectation.

+
+

+ + TYPE: + int | float | None + + + DEFAULT: + {} + +

+
+ +
+ Source code in qadence/engines/jax/differentiable_backend.py +
23
+24
+25
+26
+27
+28
+29
+30
def __init__(
+    self,
+    backend: Backend,
+    diff_mode: DiffMode = DiffMode.AD,
+    **psr_args: int | float | None,
+) -> None:
+    super().__init__(backend=backend, engine=Engine.JAX, diff_mode=diff_mode)
+    self.psr_args = psr_args
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ expectation(circuit, observable, param_values={}, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG) + +

+ + +
+ +

Compute the expectation value of the circuit with the given observable.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

A converted circuit as returned by backend.circuit.

+
+

+ + TYPE: + ConvertedCircuit + +

+
observable +
+

A converted observable as returned by backend.observable.

+
+

+ + TYPE: + list[ConvertedObservable] | ConvertedObservable + +

+
param_values +
+

Already embedded parameters of the circuit. See +embedding for more info.

+
+

+ + TYPE: + ParamDictType + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + ArrayLike | None + + + DEFAULT: + None + +

+
measurement +
+

Optional measurement protocol. If None, use +exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
mitigation +
+

The error mitigation to use.

+
+

+ + TYPE: + Mitigations | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ +
+ Source code in qadence/engines/jax/differentiable_backend.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
def expectation(
+    self,
+    circuit: ConvertedCircuit,
+    observable: list[ConvertedObservable] | ConvertedObservable,
+    param_values: ParamDictType = {},
+    state: ArrayLike | None = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> ArrayLike:
+    """Compute the expectation value of the `circuit` with the given `observable`.
+
+    Arguments:
+        circuit: A converted circuit as returned by `backend.circuit`.
+        observable: A converted observable as returned by `backend.observable`.
+        param_values: _**Already embedded**_ parameters of the circuit. See
+            [`embedding`][qadence.blocks.embedding.embedding] for more info.
+        state: Initial state.
+        measurement: Optional measurement protocol. If None, use
+            exact expectation value with a statevector simulator.
+        noise: A noise model to use.
+        mitigation: The error mitigation to use.
+        endianness: Endianness of the resulting bit strings.
+    """
+    observable = observable if isinstance(observable, list) else [observable]
+
+    if self.diff_mode == DiffMode.AD:
+        expectation = self.backend.expectation(circuit, observable, param_values, state)
+    else:
+        expectation = DifferentiableExpectation(
+            backend=self.backend,
+            circuit=circuit,
+            observable=observable,
+            param_values=param_values,
+            state=state,
+            measurement=measurement,
+            noise=noise,
+            mitigation=mitigation,
+            endianness=endianness,
+        ).psr()
+    return expectation
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/backends/pulser/index.html b/v1.7.4/api/backends/pulser/index.html new file mode 100644 index 000000000..d89bb1179 --- /dev/null +++ b/v1.7.4/api/backends/pulser/index.html @@ -0,0 +1,2686 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Pulser - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Pulser

+ +

The Pulser backend features a basic integration with the pulse-level programming +interface Pulser. This backend offers for now few simple operations +which are translated into a valid, non time-dependent pulse sequence. In particular, one has access to:

+
    +
  • analog rotations: AnalogRx and AnalogRy blocks
  • +
  • free evolution blocks (basically no pulse, just interaction): AnalogWait block
  • +
  • a block for creating entangled states: AnalogEntanglement
  • +
  • digital rotation Rx and Ry
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Backend(name=BackendName.PULSER, supports_ad=False, support_bp=False, supports_adjoint=False, is_remote=False, with_measurements=True, native_endianness=Endianness.BIG, engine=Engine.TORCH, with_noise=False, config=Configuration()) + + + dataclass + + +

+ + +
+

+ Bases: Backend

+ + +

The Pulser backend.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ create_register(register) + +

+ + +
+ +

Convert Qadence Register to Pulser Register.

+ +
+ Source code in qadence/backends/pulser/backend.py +
51
+52
+53
+54
def create_register(register: Register) -> PulserRegister:
+    """Convert Qadence Register to Pulser Register."""
+    coords = np.array(list(register.coords.values()))
+    return PulserRegister.from_coordinates(coords)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/backends/pyqtorch/index.html b/v1.7.4/api/backends/pyqtorch/index.html new file mode 100644 index 000000000..f48fc0c74 --- /dev/null +++ b/v1.7.4/api/backends/pyqtorch/index.html @@ -0,0 +1,3564 @@ + + + + + + + + + + + + + + + + + + + + + + + + + PyQTorch - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

PyQTorch

+ +

Fast differentiable statevector emulator based on PyTorch. The code is open source, +hosted on Github and maintained by Pasqal.

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Backend(name=BackendName.PYQTORCH, supports_ad=True, support_bp=True, supports_adjoint=True, is_remote=False, with_measurements=True, native_endianness=Endianness.BIG, engine=Engine.TORCH, with_noise=False, config=Configuration()) + + + dataclass + + +

+ + +
+

+ Bases: Backend

+ + +

PyQTorch backend.

+ + + + +
+ + + + + + + + + +
+ + +

+ convert(circuit, observable=None) + +

+ + +
+ +

Convert an abstract circuit and an optional observable to their native representation.

+

Additionally, this function constructs an embedding function which maps from +user-facing parameters to device parameters (read more on parameter embedding +here).

+ +
+ Source code in qadence/backend.py +
def convert(
+    self, circuit: QuantumCircuit, observable: list[AbstractBlock] | AbstractBlock | None = None
+) -> Converted:
+    """Convert an abstract circuit and an optional observable to their native representation.
+
+    Additionally, this function constructs an embedding function which maps from
+    user-facing parameters to device parameters (read more on parameter embedding
+    [here][qadence.blocks.embedding.embedding]).
+    """
+
+    def check_observable(obs_obj: Any) -> AbstractBlock:
+        if isinstance(obs_obj, QubitOperator):
+            from qadence.blocks.manipulate import from_openfermion
+
+            assert len(obs_obj.terms) > 0, "Make sure to give a non-empty qubit hamiltonian"
+
+            return from_openfermion(obs_obj)
+
+        elif isinstance(obs_obj, (CompositeBlock, PrimitiveBlock, ScaleBlock)):
+            from qadence.blocks.utils import block_is_qubit_hamiltonian
+
+            assert block_is_qubit_hamiltonian(
+                obs_obj
+            ), "Make sure the QubitHamiltonian consists only of Pauli operators X, Y, Z, I"
+            return obs_obj
+        raise TypeError(
+            "qubit_hamiltonian should be a Pauli-like AbstractBlock or a QubitOperator"
+        )
+
+    conv_circ = self.circuit(circuit)
+    circ_params, circ_embedding_fn = embedding(
+        conv_circ.abstract.block, self.config._use_gate_params, self.engine
+    )
+    params = circ_params
+    if observable is not None:
+        observable = observable if isinstance(observable, list) else [observable]
+        conv_obs = []
+        obs_embedding_fn_list = []
+
+        for obs in observable:
+            obs = check_observable(obs)
+            c_obs = self.observable(obs, max(circuit.n_qubits, obs.n_qubits))
+            obs_params, obs_embedding_fn = embedding(
+                c_obs.abstract, self.config._use_gate_params, self.engine
+            )
+            params.update(obs_params)
+            obs_embedding_fn_list.append(obs_embedding_fn)
+            conv_obs.append(c_obs)
+
+        def embedding_fn_dict(a: dict, b: dict) -> dict:
+            embedding_dict = circ_embedding_fn(a, b)
+            for o in obs_embedding_fn_list:
+                embedding_dict.update(o(a, b))
+            return embedding_dict
+
+        return Converted(conv_circ, conv_obs, embedding_fn_dict, params)
+
+    def embedding_fn(a: dict, b: dict) -> dict:
+        return circ_embedding_fn(a, b)
+
+    return Converted(conv_circ, None, embedding_fn, params)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Configuration(_use_gate_params=True, use_sparse_observable=False, use_gradient_checkpointing=False, use_single_qubit_composition=False, transpilation_passes=None, algo_hevo=AlgoHEvo.EXP, ode_solver=SolverType.DP5_SE, n_steps_hevo=100, loop_expectation=False) + + + dataclass + + +

+ + +
+

+ Bases: BackendConfiguration

+ + + + + +
+ + + + + + + +
+ + + +

+ algo_hevo: AlgoHEvo = AlgoHEvo.EXP + + + class-attribute + instance-attribute + + +

+ + +
+ +

Determine which kind of Hamiltonian evolution algorithm to use.

+
+ +
+ +
+ + + +

+ loop_expectation: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

When computing batches of expectation values, only allocate one wavefunction.

+

Loop over the batch of parameters to only allocate a single wavefunction at any given time.

+
+ +
+ +
+ + + +

+ n_steps_hevo: int = 100 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Default number of steps for the Hamiltonian evolution.

+
+ +
+ +
+ + + +

+ ode_solver: SolverType = SolverType.DP5_SE + + + class-attribute + instance-attribute + + +

+ + +
+ +

Determine which ODE solver to use for time-dependent blocks.

+
+ +
+ +
+ + + +

+ use_gradient_checkpointing: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use gradient checkpointing.

+

Recommended for higher-order optimization tasks.

+
+ +
+ +
+ + + +

+ use_single_qubit_composition: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

Composes chains of single qubit gates into a single matmul if possible.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ supported_gates = list(set(OpName.list()) - set([OpName.TDAGGER])) + + + module-attribute + + +

+ + +
+ +

The set of supported gates.

+

Tdagger is currently not supported.

+
+ +
+ + +
+ + + +

+ PyQHamiltonianEvolution(qubit_support, n_qubits, block, config) + +

+ + +
+

+ Bases: Module

+ + +
+ Source code in qadence/backends/pyqtorch/convert_ops.py +
def __init__(
+    self,
+    qubit_support: Tuple[int, ...],
+    n_qubits: int,
+    block: TimeEvolutionBlock,
+    config: Configuration,
+):
+    super().__init__()
+    self.qubit_support = qubit_support
+    self.n_qubits = n_qubits
+    self.param_names = config.get_param_name(block)
+    self.block = block
+    self.hmat: Tensor
+    self.config = config
+
+    if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric:
+        hmat = block_to_tensor(
+            block.generator,
+            qubit_support=self.qubit_support,
+            use_full_support=False,
+        )
+        hmat = hmat.permute(1, 2, 0)
+        self.register_buffer("hmat", hmat)
+        self._hamiltonian = lambda self, values: self.hmat
+
+    elif isinstance(block.generator, Tensor):
+        m = block.generator.to(dtype=cdouble)
+        hmat = block_to_tensor(
+            MatrixBlock(
+                m,
+                qubit_support=block.qubit_support,
+                check_unitary=False,
+                check_hermitian=True,
+            ),
+            qubit_support=self.qubit_support,
+            use_full_support=False,
+        )
+        hmat = hmat.permute(1, 2, 0)
+        self.register_buffer("hmat", hmat)
+        self._hamiltonian = lambda self, values: self.hmat
+
+    elif isinstance(block.generator, sympy.Basic):
+        self._hamiltonian = (
+            lambda self, values: values[self.param_names[1]].squeeze(3).permute(1, 2, 0)
+        )
+        # FIXME Why are we squeezing
+    else:
+
+        def _hamiltonian(self: PyQHamiltonianEvolution, values: dict[str, Tensor]) -> Tensor:
+            hmat = _block_to_tensor_embedded(
+                block.generator,  # type: ignore[arg-type]
+                values=values,
+                qubit_support=self.qubit_support,
+                use_full_support=False,
+                device=self.device,
+            )
+            return hmat.permute(1, 2, 0)
+
+        self._hamiltonian = _hamiltonian
+
+    self._time_evolution = lambda values: values[self.param_names[0]]
+    self._device: torch_device = (
+        self.hmat.device if hasattr(self, "hmat") else torch_device("cpu")
+    )
+    self._dtype: torch_dtype = self.hmat.dtype if hasattr(self, "hmat") else cdouble
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ dagger(values) + +

+ + +
+ +

Dagger of the evolved operator given the current parameter values.

+ +
+ Source code in qadence/backends/pyqtorch/convert_ops.py +
def dagger(self, values: dict[str, Tensor]) -> Tensor:
+    """Dagger of the evolved operator given the current parameter values."""
+    return _dagger(self.unitary(values))
+
+
+
+ +
+ +
+ + +

+ jacobian_generator(values) + +

+ + +
+ +

Approximate jacobian of the evolved operator with respect to generator parameter(s).

+ +
+ Source code in qadence/backends/pyqtorch/convert_ops.py +
def jacobian_generator(self, values: dict[str, Tensor]) -> Tensor:
+    """Approximate jacobian of the evolved operator with respect to generator parameter(s)."""
+    if len(self.param_names) > 2:
+        raise NotImplementedError(
+            "jacobian_generator does not support generators\
+                                    with more than 1 parameter."
+        )
+
+    def _generator(val: Tensor) -> Tensor:
+        val_copy = values.copy()
+        val_copy[self.param_names[1]] = val
+        hmat = _block_to_tensor_embedded(
+            self.block.generator,  # type: ignore[arg-type]
+            values=val_copy,
+            qubit_support=self.qubit_support,
+            use_full_support=False,
+            device=self.device,
+        )
+        return hmat.permute(1, 2, 0)
+
+    return finitediff(
+        lambda v: self._unitary(
+            time_evolution=self._time_evolution(values), hamiltonian=_generator(v)
+        ),
+        values[self.param_names[1]].reshape(-1, 1),
+        (0,),
+    )
+
+
+
+ +
+ +
+ + +

+ jacobian_time(values) + +

+ + +
+ +

Approximate jacobian of the evolved operator with respect to time evolution.

+ +
+ Source code in qadence/backends/pyqtorch/convert_ops.py +
def jacobian_time(self, values: dict[str, Tensor]) -> Tensor:
+    """Approximate jacobian of the evolved operator with respect to time evolution."""
+    return finitediff(
+        lambda t: self._unitary(time_evolution=t, hamiltonian=self._hamiltonian(self, values)),
+        values[self.param_names[0]].reshape(-1, 1),
+        (0,),
+    )
+
+
+
+ +
+ +
+ + +

+ unitary(values) + +

+ + +
+ +

The evolved operator given current parameter values for generator and time evolution.

+ +
+ Source code in qadence/backends/pyqtorch/convert_ops.py +
def unitary(self, values: dict[str, Tensor]) -> Tensor:
+    """The evolved operator given current parameter values for generator and time evolution."""
+    return self._unitary(self._hamiltonian(self, values), self._time_evolution(values))
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/blocks/index.html b/v1.7.4/api/blocks/index.html new file mode 100644 index 000000000..bbfdb2f3c --- /dev/null +++ b/v1.7.4/api/blocks/index.html @@ -0,0 +1,5280 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Block system - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Block system

+ +

qadence offers a block-based system to construct quantum circuits in a flexible manner.

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AbstractBlock(tag=None, __array_priority__=1000) + + + dataclass + + +

+ + +
+

+ Bases: ABC

+ + +

Base class for both primitive and composite blocks.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
ATTRIBUTEDESCRIPTION
name +
+

A human-readable name attached to the block type. Notice, this is +the same for all the class instances so it cannot be used for identifying +different blocks

+
+

+ + TYPE: + str + +

+
qubit_support +
+

The qubit support of the block expressed as +a tuple of integers

+
+

+ + TYPE: + tuple[int, ...] + +

+
tag +
+

A tag identifying a particular instance of the block which can +be used for identification and pretty printing

+
+

+ + TYPE: + str | None + +

+
eigenvalues +
+

The eigenvalues of the matrix representing the block. +This is used mainly for primitive blocks and it's needed for generalized parameter +shift rule computations. Currently unused.

+
+

+ + TYPE: + list[float] | None + +

+
+ + + + +
+ + + + + + + +
+ + + +

+ is_identity: bool + + + property + + +

+ + +
+ +

Identity predicate for blocks.

+
+ +
+ + + +
+ + +

+ n_qubits() + +

+ + +
+ +

The number of qubits in the whole system.

+

A block acting on qubit N would has at least n_qubits >= N + 1.

+ +
+ Source code in qadence/blocks/abstract.py +
47
+48
+49
+50
+51
+52
+53
@abstractproperty
+def n_qubits(self) -> int:
+    """The number of qubits in the whole system.
+
+    A block acting on qubit N would has at least n_qubits >= N + 1.
+    """
+    pass
+
+
+
+ +
+ +
+ + +

+ n_supports() + +

+ + +
+ +

The number of qubits the block is acting on.

+ +
+ Source code in qadence/blocks/abstract.py +
55
+56
+57
+58
@abstractproperty
+def n_supports(self) -> int:
+    """The number of qubits the block is acting on."""
+    pass
+
+
+
+ +
+ +
+ + +

+ qubit_support() + +

+ + +
+ +

The indices of the qubit(s) the block is acting on.

+

Qadence uses the ordering [0..,N-1] for qubits.

+ +
+ Source code in qadence/blocks/abstract.py +
39
+40
+41
+42
+43
+44
+45
@abstractproperty
+def qubit_support(self) -> Tuple[int, ...]:
+    """The indices of the qubit(s) the block is acting on.
+
+    Qadence uses the ordering [0..,N-1] for qubits.
+    """
+    pass
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +

Primitive blocks

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ ControlBlock(control, target_block) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The abstract ControlBlock.

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, control: tuple[int, ...], target_block: PrimitiveBlock) -> None:
+    self.control = control
+    self.blocks = (target_block,)
+    self.target = target_block.qubit_support
+
+    # using tuple expansion because some control operations could
+    # have multiple targets, e.g. CSWAP
+    super().__init__((*control, *self.target))  # target_block.qubit_support[0]))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ParametricBlock(qubit_support) + + + dataclass + + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

Parameterized primitive blocks.

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, qubit_support: tuple[int, ...]):
+    self._qubit_support = qubit_support
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ num_parameters() + + + abstractmethod + + +

+ + +
+ +

The number of parameters required by the block.

+

This is a class property since the number of parameters is defined +automatically before instantiating the operation. Also, this could +correspond to a larger number of actual user-facing parameters +since any parameter expression is allowed

+

Examples: +- RX operation has 1 parameter +- U operation has 3 parameters +- HamEvo has 2 parameters (generator and time evolution)

+ +
+ Source code in qadence/blocks/primitive.py +
@abstractmethod
+def num_parameters(cls) -> int:
+    """The number of parameters required by the block.
+
+    This is a class property since the number of parameters is defined
+    automatically before instantiating the operation. Also, this could
+    correspond to a larger number of actual user-facing parameters
+    since any parameter expression is allowed
+
+    Examples:
+    - RX operation has 1 parameter
+    - U operation has 3 parameters
+    - HamEvo has 2 parameters (generator and time evolution)
+    """
+    pass
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ParametricControlBlock(control, target_block) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

The abstract parametrized ControlBlock.

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, control: tuple[int, ...], target_block: ParametricBlock) -> None:
+    self.blocks = (target_block,)
+    self.control = control
+    self.parameters = target_block.parameters
+    super().__init__((*control, *target_block.qubit_support))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ PrimitiveBlock(qubit_support) + +

+ + +
+

+ Bases: AbstractBlock

+ + +

Primitive blocks represent elementary unitary operations.

+

Examples are single/multi-qubit gates or Hamiltonian evolution. +See qadence.operations for a full list of +primitive blocks.

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, qubit_support: tuple[int, ...]):
+    self._qubit_support = qubit_support
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ digital_decomposition() + +

+ + +
+ +

Decomposition into purely digital gates.

+

This method returns a decomposition of the Block in a +combination of purely digital single-qubit and two-qubit +'gates', by manual/custom knowledge of how this can be done efficiently. +:return:

+ +
+ Source code in qadence/blocks/primitive.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
def digital_decomposition(self) -> AbstractBlock:
+    """Decomposition into purely digital gates.
+
+    This method returns a decomposition of the Block in a
+    combination of purely digital single-qubit and two-qubit
+    'gates', by manual/custom knowledge of how this can be done efficiently.
+    :return:
+    """
+    return self
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ProjectorBlock(ket, bra, qubit_support) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The abstract ProjectorBlock.

+ +

Arguments:

+
ket (str): The ket given as a bitstring.
+bra (str): The bra given as a bitstring.
+qubit_support (int | tuple[int]): The qubit_support of the block.
+
+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(
+    self,
+    ket: str,
+    bra: str,
+    qubit_support: int | tuple[int, ...],
+) -> None:
+    """
+    Arguments:
+
+        ket (str): The ket given as a bitstring.
+        bra (str): The bra given as a bitstring.
+        qubit_support (int | tuple[int]): The qubit_support of the block.
+    """
+    if isinstance(qubit_support, int):
+        qubit_support = (qubit_support,)
+    if len(bra) != len(ket):
+        raise ValueError(
+            "Bra and ket must be bitstrings of same length in the 'Projector' definition."
+        )
+    elif len(bra) != len(qubit_support):
+        raise ValueError("Bra or ket must be of same length as the 'qubit_support'")
+    for wf in [bra, ket]:
+        if not all(int(item) == 0 or int(item) == 1 for item in wf):
+            raise ValueError(
+                "All qubits must be either in the '0' or '1' state"
+                " in the 'ProjectorBlock' definition."
+            )
+
+    self.ket = ket
+    self.bra = bra
+    super().__init__(qubit_support)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ScaleBlock(block, parameter) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

Scale blocks are created when multiplying a block by a number or parameter.

+

Example: +

from qadence import X
+
+print(X(0) * 2)
+
+
+ +
[mul: 2] 
+└── X(0)
+
+ +

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, block: AbstractBlock, parameter: Any):
+    self.block = block
+    # TODO: more meaningful name like `scale`?
+    self.parameters = (
+        parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter)
+    )
+    super().__init__(block.qubit_support)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TimeEvolutionBlock(qubit_support) + + + dataclass + + +

+ + +
+

+ Bases: ParametricBlock

+ + +

Simple time evolution block with time-independent Hamiltonian.

+

This class is just a convenience class which is used to label +blocks which contains simple time evolution with time-independent +Hamiltonian operators

+ +
+ Source code in qadence/blocks/primitive.py +
def __init__(self, qubit_support: tuple[int, ...]):
+    self._qubit_support = qubit_support
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +

Analog blocks

+

To learn how to use analog blocks and how to mix digital & analog blocks, check out the +digital-analog section of the documentation.

+

Examples on how to use digital-analog blocks can be found in the +*examples folder of the qadence repo:

+
    +
  • Fit a simple sinus: examples/digital-analog/fit-sin.py
  • +
  • Solve a QUBO: examples/digital-analog/qubo.py
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AnalogChain(blocks) + + + dataclass + + +

+ + +
+

+ Bases: AnalogComposite

+ + +

A chain of analog blocks.

+

Needed because analog blocks require +stricter validation than the general ChainBlock.

+

AnalogChains can only be constructed from AnalogKron blocks or +globally supported, primitive, analog blocks (like InteractionBlocks and +ConstantAnalogRotations).

+

Automatically constructed by the chain +function if only analog blocks are given.

+

Example: +

from qadence import X, chain, AnalogInteraction
+
+b = chain(AnalogInteraction(200), AnalogInteraction(200))
+print(type(b))  # this is an `AnalogChain`
+
+b = chain(X(0), AnalogInteraction(200))
+print(type(b))  # this is a general `ChainBlock`
+
+
+ +
<class 'qadence.blocks.analog.AnalogChain'>
+<class 'qadence.blocks.composite.ChainBlock'>
+
+ +

+ +
+ Source code in qadence/blocks/analog.py +
def __init__(self, blocks: Tuple[AnalogBlock, ...]):
+    """A chain of analog blocks.
+
+    Needed because analog blocks require
+    stricter validation than the general `ChainBlock`.
+
+    `AnalogChain`s can only be constructed from `AnalogKron` blocks or
+    _**globally supported**_, primitive, analog blocks (like `InteractionBlock`s and
+    `ConstantAnalogRotation`s).
+
+    Automatically constructed by the [`chain`][qadence.blocks.utils.chain]
+    function if only analog blocks are given.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import X, chain, AnalogInteraction
+
+    b = chain(AnalogInteraction(200), AnalogInteraction(200))
+    print(type(b))  # this is an `AnalogChain`
+
+    b = chain(X(0), AnalogInteraction(200))
+    print(type(b))  # this is a general `ChainBlock`
+    ```
+    """
+    for b in blocks:
+        if not (isinstance(b, AnalogKron) or b.qubit_support.is_global):
+            raise ValueError("Only KronBlocks or global blocks can be chain'ed.")
+    self.blocks = blocks
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ AnalogKron(blocks, interaction=Interaction.NN) + + + dataclass + + +

+ + +
+

+ Bases: AnalogComposite

+ + +

Stack analog blocks vertically (i.e. in time).

+

Needed because analog require +stricter validation than the general KronBlock.

+

AnalogKrons can only be constructed from non-global, analog blocks +with the same duration.

+ +
+ Source code in qadence/blocks/analog.py +
def __init__(self, blocks: Tuple[AnalogBlock, ...], interaction: Interaction = Interaction.NN):
+    """Stack analog blocks vertically (i.e. in time).
+
+    Needed because analog require
+    stricter validation than the general `KronBlock`.
+
+    `AnalogKron`s can only be constructed from _**non-global**_, analog blocks
+    with the _**same duration**_.
+    """
+    if len(blocks) == 0:
+        raise NotImplementedError("Empty KronBlocks not supported")
+
+    self.blocks = blocks
+    self.interaction = interaction
+
+    qubit_support = QubitSupport()
+    duration = blocks[0].duration
+    for b in blocks:
+        if not isinstance(b, AnalogBlock):
+            raise ValueError("Can only kron `AnalgoBlock`s with other `AnalgoBlock`s.")
+
+        if b.qubit_support == QubitSupport("global"):
+            raise ValueError("Blocks with global support cannot be kron'ed.")
+
+        if not qubit_support.is_disjoint(b.qubit_support):
+            raise ValueError("Make sure blocks act on distinct qubits!")
+
+        if not np.isclose(evaluate(duration), evaluate(b.duration)):
+            raise ValueError("Kron'ed blocks have to have same duration.")
+
+        qubit_support += b.qubit_support
+
+    self.blocks = blocks
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ConstantAnalogRotation(tag=None, __array_priority__=1000, _eigenvalues_generator=None, parameters=ParamMap(alpha=0.0, duration=1000.0, omega=0.0, delta=0.0, phase=0.0), qubit_support=QubitSupport('global'), add_pattern=True) + + + dataclass + + +

+ + +
+

+ Bases: AnalogBlock

+ + +

Implements a constant analog rotation with interaction dictated by the chosen Hamiltonian.

+
H/h = ∑ᵢ(Ω/2 cos(φ)*Xᵢ - sin(φ)*Yᵢ - δnᵢ) + Hᵢₙₜ.
+
+

To construct this block you can use of the following convenience wrappers: +- The general rotation operation AnalogRot +- Shorthands for rotatins around an axis: + AnalogRX, + AnalogRY, + AnalogRZ

+

WARNING: do not use ConstantAnalogRotation with alpha as differentiable parameter - use +the convenience wrappers mentioned above.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ InteractionBlock(tag=None, __array_priority__=1000, _eigenvalues_generator=None, parameters=ParamMap(duration=1000.0), qubit_support=QubitSupport('global'), add_pattern=True) + + + dataclass + + +

+ + +
+

+ Bases: AnalogBlock

+ + +

Free-evolution for the Hamiltonian interaction term of a register of qubits.

+

In real interacting quantum devices, it means letting the system evolve freely according +to the time-dependent Schrodinger equation. With emulators, this block is translated to an +appropriate interaction Hamiltonian, for example, an Ising interaction

+
Hᵢₙₜ = ∑ᵢⱼ C₆/rᵢⱼ⁶ nᵢnⱼ
+
+

or an XY-interaction

+
Hᵢₙₜ = ∑ᵢⱼ C₃/rⱼⱼ³ (XᵢXⱼ + ZᵢZⱼ)
+
+

with nᵢ = (1-Zᵢ)/2.

+

To construct, use the AnalogInteraction function.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +

Composite blocks

+ + +
+ + +

+ chain(*args) + +

+ + +
+ +

Chain blocks sequentially.

+

On digital backends this can be interpreted +loosely as a matrix mutliplication of blocks. In the analog case it chains +blocks in time.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
*args +
+

Blocks to chain. Can also be a generator.

+
+

+ + TYPE: + Union[AbstractBlock, Generator, List[AbstractBlock]] + + + DEFAULT: + () + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ChainBlock + + +
+

ChainBlock

+
+
+

Example: +

from qadence import X, Y, chain
+
+b = chain(X(0), Y(0))
+
+# or use a generator
+b = chain(X(i) for i in range(3))
+print(b)
+
+
+ +
ChainBlock(0,1,2)
+├── X(0)
+├── X(1)
+└── X(2)
+
+ +

+ +
+ Source code in qadence/blocks/utils.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
def chain(*args: Union[AbstractBlock, Generator, List[AbstractBlock]]) -> ChainBlock:
+    """Chain blocks sequentially.
+
+    On digital backends this can be interpreted
+    loosely as a matrix mutliplication of blocks. In the analog case it chains
+    blocks in time.
+
+    Arguments:
+        *args: Blocks to chain. Can also be a generator.
+
+    Returns:
+        ChainBlock
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import X, Y, chain
+
+    b = chain(X(0), Y(0))
+
+    # or use a generator
+    b = chain(X(i) for i in range(3))
+    print(b)
+    ```
+    """
+    # ugly hack to use `AnalogChain` if we are dealing only with analog blocks
+    if len(args) and all(
+        isinstance(a, AnalogBlock) or isinstance(a, AnalogComposite) for a in args
+    ):
+        return analog_chain(*args)  # type: ignore[return-value,arg-type]
+    return _construct(ChainBlock, args)
+
+
+
+ +
+ +
+ + +

+ kron(*args) + +

+ + +
+ +

Stack blocks vertically.

+

On digital backends this can be intepreted +loosely as a kronecker product of blocks. In the analog case it executes +blocks parallel in time.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
*args +
+

Blocks to kron. Can also be a generator.

+
+

+ + TYPE: + Union[AbstractBlock, Generator] + + + DEFAULT: + () + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

KronBlock

+
+
+

Example: +

from qadence import X, Y, kron
+
+b = kron(X(0), Y(1))
+
+# or use a generator
+b = kron(X(i) for i in range(3))
+print(b)
+
+
+ +
KronBlock(0,1,2)
+├── X(0)
+├── X(1)
+└── X(2)
+
+ +

+ +
+ Source code in qadence/blocks/utils.py +
def kron(*args: Union[AbstractBlock, Generator]) -> KronBlock:
+    """Stack blocks vertically.
+
+    On digital backends this can be intepreted
+    loosely as a kronecker product of blocks. In the analog case it executes
+    blocks parallel in time.
+
+    Arguments:
+        *args: Blocks to kron. Can also be a generator.
+
+    Returns:
+        KronBlock
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import X, Y, kron
+
+    b = kron(X(0), Y(1))
+
+    # or use a generator
+    b = kron(X(i) for i in range(3))
+    print(b)
+    ```
+    """
+    # ugly hack to use `AnalogKron` if we are dealing only with analog blocks
+    if len(args) and all(
+        isinstance(a, AnalogBlock) or isinstance(a, AnalogComposite) for a in args
+    ):
+        return analog_kron(*args)  # type: ignore[return-value,arg-type]
+    return _construct(KronBlock, args)
+
+
+
+ +
+ +
+ + +

+ add(*args) + +

+ + +
+ +

Sums blocks.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
*args +
+

Blocks to add. Can also be a generator.

+
+

+ + TYPE: + Union[AbstractBlock, Generator] + + + DEFAULT: + () + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + AddBlock + + +
+

AddBlock

+
+
+

Example: +

from qadence import X, Y, add
+
+b = add(X(0), Y(0))
+
+# or use a generator
+b = add(X(i) for i in range(3))
+print(b)
+
+
+ +
AddBlock(0,1,2)
+├── X(0)
+├── X(1)
+└── X(2)
+
+ +

+ +
+ Source code in qadence/blocks/utils.py +
def add(*args: Union[AbstractBlock, Generator]) -> AddBlock:
+    """Sums blocks.
+
+    Arguments:
+        *args: Blocks to add. Can also be a generator.
+
+    Returns:
+        AddBlock
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import X, Y, add
+
+    b = add(X(0), Y(0))
+
+    # or use a generator
+    b = add(X(i) for i in range(3))
+    print(b)
+    ```
+    """
+    return _construct(AddBlock, args)
+
+
+
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AddBlock(blocks) + +

+ + +
+

+ Bases: CompositeBlock

+ + +

Adds blocks.

+

Constructed via add.

+ +
+ Source code in qadence/blocks/composite.py +
def __init__(self, blocks: Tuple[AbstractBlock, ...]):
+    self.blocks = blocks
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ ChainBlock(blocks) + +

+ + +
+

+ Bases: CompositeBlock

+ + +

Chains blocks sequentially.

+

Constructed via chain

+ +
+ Source code in qadence/blocks/composite.py +
def __init__(self, blocks: Tuple[AbstractBlock, ...]):
+    self.blocks = blocks
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CompositeBlock(tag=None, __array_priority__=1000) + + + dataclass + + +

+ + +
+

+ Bases: AbstractBlock

+ + +

Block which composes multiple blocks into one larger block (which can again be composed).

+

Composite blocks are constructed via chain, +kron, and add.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ KronBlock(blocks) + +

+ + +
+

+ Bases: CompositeBlock

+ + +

Stacks blocks horizontally.

+

Constructed via kron.

+ +
+ Source code in qadence/blocks/composite.py +
def __init__(self, blocks: Tuple[AbstractBlock, ...]):
+    if len(blocks) == 0:
+        raise NotImplementedError("Empty KronBlocks not supported")
+
+    qubit_support = QubitSupport()
+    for b in blocks:
+        assert (
+            QubitSupportType.GLOBAL,
+        ) != b.qubit_support, "Blocks with global support cannot be kron'ed."
+        assert qubit_support.is_disjoint(
+            b.qubit_support
+        ), "Make sure blocks act on distinct qubits!"
+        qubit_support += b.qubit_support
+
+    self.blocks = blocks
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +

Converting blocks to matrices

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ block_to_tensor(block, values={}, qubit_support=None, use_full_support=True, tensor_type=TensorType.DENSE, endianness=Endianness.BIG, device=None) + +

+ + +
+ +

Convert a block into a torch tensor.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
block +
+

The block to convert.

+
+

+ + TYPE: + AbstractBlock + +

+
values +
+

A optional dict with values for parameters.

+
+

+ + TYPE: + dict + + + DEFAULT: + {} + +

+
qubit_support +
+

The qubit_support of the block.

+
+

+ + TYPE: + tuple + + + DEFAULT: + None + +

+
use_full_support +
+

True infers the total number of qubits.

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
tensor_type +
+

the target tensor type.

+
+

+ + TYPE: + TensorType + + + DEFAULT: + DENSE + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence import hea, hamiltonian_factory, Z, block_to_tensor
+
+block = hea(2,2)
+print(block_to_tensor(block))
+
+# In case you have a diagonal observable, you can use
+obs = hamiltonian_factory(2, detuning = Z)
+print(block_to_tensor(obs, tensor_type="SparseDiagonal"))
+
+
+ +
tensor([[[ 0.1392+0.5072j, -0.4330-0.5106j, -0.3510-0.0655j, -0.3807+0.0524j],
+         [ 0.0809-0.2256j,  0.3804+0.2862j, -0.6871-0.2574j, -0.3869+0.1668j],
+         [-0.0577-0.7394j, -0.2771-0.2983j,  0.0811+0.1346j, -0.4272-0.2774j],
+         [-0.0456-0.3372j, -0.2492-0.3119j, -0.2042-0.5174j,  0.5576+0.3232j]]],
+       grad_fn=<UnsafeViewBackward0>)
+tensor(indices=tensor([[0, 3],
+                       [0, 3]]),
+       values=tensor([ 2.+0.j, -2.+0.j]),
+       size=(4, 4), nnz=2, layout=torch.sparse_coo)
+
+ +

+ +
+ Source code in qadence/blocks/block_to_tensor.py +
def block_to_tensor(
+    block: AbstractBlock,
+    values: dict[str, TNumber | torch.Tensor] = {},
+    qubit_support: tuple | None = None,
+    use_full_support: bool = True,
+    tensor_type: TensorType = TensorType.DENSE,
+    endianness: Endianness = Endianness.BIG,
+    device: torch.device = None,
+) -> torch.Tensor:
+    """
+    Convert a block into a torch tensor.
+
+    Arguments:
+        block (AbstractBlock): The block to convert.
+        values (dict): A optional dict with values for parameters.
+        qubit_support (tuple): The qubit_support of the block.
+        use_full_support (bool): True infers the total number of qubits.
+        tensor_type (TensorType): the target tensor type.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import hea, hamiltonian_factory, Z, block_to_tensor
+
+    block = hea(2,2)
+    print(block_to_tensor(block))
+
+    # In case you have a diagonal observable, you can use
+    obs = hamiltonian_factory(2, detuning = Z)
+    print(block_to_tensor(obs, tensor_type="SparseDiagonal"))
+    ```
+    """
+
+    # FIXME: default use_full_support to False. In general, it would
+    # be more efficient to do that, and make sure that computations such
+    # as observables only do the matmul of the size of the qubit support.
+
+    if tensor_type == TensorType.DENSE:
+        from qadence.blocks import embedding
+
+        (ps, embed) = embedding(block)
+        return _block_to_tensor_embedded(
+            block,
+            embed(ps, values),
+            qubit_support,
+            use_full_support,
+            endianness=endianness,
+            device=device,
+        )
+
+    elif tensor_type == TensorType.SPARSEDIAGONAL:
+        t = block_to_diagonal(block, endianness=endianness)
+        indices, values, size = torch.nonzero(t), t[t != 0], len(t)
+        indices = torch.stack((indices.flatten(), indices.flatten()))
+        return torch.sparse_coo_tensor(indices, values, (size, size))
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/constructors/index.html b/v1.7.4/api/constructors/index.html new file mode 100644 index 000000000..14a568aad --- /dev/null +++ b/v1.7.4/api/constructors/index.html @@ -0,0 +1,7433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Constructors - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Constructors for common quantum circuits

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ exp_fourier_feature_map(n_qubits, support=None, param='x', feature_range=None) + +

+ + +
+ +

Exponential fourier feature map.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

number of qubits in the feature

+
+

+ + TYPE: + int + +

+
support +
+

qubit support

+
+

+ + TYPE: + tuple[int, ...] + + + DEFAULT: + None + +

+
param +
+

name of feature Parameter

+
+

+ + TYPE: + str + + + DEFAULT: + 'x' + +

+
feature_range +
+

min and max value of the feature, as floats in a Tuple

+
+

+ + TYPE: + tuple[float, float] + + + DEFAULT: + None + +

+
+ +
+ Source code in qadence/constructors/feature_maps.py +
def exp_fourier_feature_map(
+    n_qubits: int,
+    support: tuple[int, ...] = None,
+    param: str = "x",
+    feature_range: tuple[float, float] = None,
+) -> AbstractBlock:
+    """
+    Exponential fourier feature map.
+
+    Args:
+        n_qubits: number of qubits in the feature
+        support: qubit support
+        param: name of feature `Parameter`
+        feature_range: min and max value of the feature, as floats in a Tuple
+    """
+
+    if feature_range is None:
+        feature_range = (0.0, 2.0**n_qubits)
+
+    support = tuple(range(n_qubits)) if support is None else support
+    hlayer = kron(H(qubit) for qubit in support)
+    rlayer = feature_map(
+        n_qubits,
+        support=support,
+        param=param,
+        op=RZ,
+        fm_type=BasisSet.FOURIER,
+        reupload_scaling=ReuploadScaling.EXP,
+        feature_range=feature_range,
+        target_range=(0.0, 2 * PI),
+    )
+    rlayer.tag = None
+    return tag(chain(hlayer, rlayer), f"ExpFourierFM({param})")
+
+
+
+ +
+ +
+ + +

+ feature_map(n_qubits, support=None, param='phi', op=RX, fm_type=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.CONSTANT, feature_range=None, target_range=None, multiplier=None, param_prefix=None) + +

+ + +
+ +

Construct a feature map of a given type.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

Number of qubits the feature map covers. Results in support=range(n_qubits).

+
+

+ + TYPE: + int + +

+
support +
+

Puts one feature-encoding rotation gate on every qubit in support. n_qubits in +this case specifies the total overall qubits of the circuit, which may be wider than the +support itself, but not narrower.

+
+

+ + TYPE: + tuple[int, ...] | None + + + DEFAULT: + None + +

+
param +
+

Parameter of the feature map; you can pass a string or Parameter; +it will be set as non-trainable (FeatureParameter) regardless.

+
+

+ + TYPE: + Parameter | str + + + DEFAULT: + 'phi' + +

+
op +
+

Rotation operation of the feature map; choose from RX, RY, RZ or PHASE.

+
+

+ + TYPE: + RotationTypes + + + DEFAULT: + RX + +

+
fm_type +
+

Basis set for data encoding; choose from BasisSet.FOURIER for Fourier +encoding, or BasisSet.CHEBYSHEV for Chebyshev polynomials of the first kind.

+
+

+ + TYPE: + BasisSet | Callable | str + + + DEFAULT: + FOURIER + +

+
reupload_scaling +
+

how the feature map scales the data that is re-uploaded for each qubit. +choose from ReuploadScaling enumeration or provide your own function with a single +int as input and int or float as output.

+
+

+ + TYPE: + ReuploadScaling | Callable | str + + + DEFAULT: + CONSTANT + +

+
feature_range +
+

range of data that the input data provided comes from. Used to map input data +to the correct domain of the feature-encoding function.

+
+

+ + TYPE: + tuple[float, float] | None + + + DEFAULT: + None + +

+
target_range +
+

range of data the data encoder assumes as the natural range. For example, +in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*PI). +Used to map data to the correct domain of the feature-encoding function.

+
+

+ + TYPE: + tuple[float, float] | None + + + DEFAULT: + None + +

+
multiplier +
+

overall multiplier; this is useful for reuploading the feature map serially with +different scalings; can be a number or parameter/expression.

+
+

+ + TYPE: + Parameter | TParameter | None + + + DEFAULT: + None + +

+
param_prefix +
+

string prefix to create trainable parameters multiplying the feature parameter +inside the feature-encoding function. Note that currently this does not take into +account the domain of the feature-encoding function.

+
+

+ + TYPE: + str | None + + + DEFAULT: + None + +

+
+

Example: +

from qadence import feature_map, BasisSet, ReuploadScaling
+
+fm = feature_map(3, fm_type=BasisSet.FOURIER)
+print(f"{fm = }")
+
+fm = feature_map(3, fm_type=BasisSet.CHEBYSHEV)
+print(f"{fm = }")
+
+fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER)
+print(f"{fm = }")
+
+
+ +
fm = KronBlock(0,1,2) [tag: Constant Fourier FM]
+├── RX(0) [params: ['phi']]
+├── RX(1) [params: ['phi']]
+└── RX(2) [params: ['phi']]
+fm = KronBlock(0,1,2) [tag: Constant Chebyshev FM]
+├── RX(0) [params: ['acos(phi)']]
+├── RX(1) [params: ['acos(phi)']]
+└── RX(2) [params: ['acos(phi)']]
+fm = KronBlock(0,1,2) [tag: Tower Fourier FM]
+├── RX(0) [params: ['1_0*phi']]
+├── RX(1) [params: ['2_0*phi']]
+└── RX(2) [params: ['3_0*phi']]
+
+ +

+ +
+ Source code in qadence/constructors/feature_maps.py +
def feature_map(
+    n_qubits: int,
+    support: tuple[int, ...] | None = None,
+    param: Parameter | str = "phi",
+    op: RotationTypes = RX,
+    fm_type: BasisSet | Callable | str = BasisSet.FOURIER,
+    reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT,
+    feature_range: tuple[float, float] | None = None,
+    target_range: tuple[float, float] | None = None,
+    multiplier: Parameter | TParameter | None = None,
+    param_prefix: str | None = None,
+) -> KronBlock:
+    """Construct a feature map of a given type.
+
+    Arguments:
+        n_qubits: Number of qubits the feature map covers. Results in `support=range(n_qubits)`.
+        support: Puts one feature-encoding rotation gate on every qubit in `support`. n_qubits in
+            this case specifies the total overall qubits of the circuit, which may be wider than the
+            support itself, but not narrower.
+        param: Parameter of the feature map; you can pass a string or Parameter;
+            it will be set as non-trainable (FeatureParameter) regardless.
+        op: Rotation operation of the feature map; choose from RX, RY, RZ or PHASE.
+        fm_type: Basis set for data encoding; choose from `BasisSet.FOURIER` for Fourier
+            encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind.
+        reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit.
+            choose from `ReuploadScaling` enumeration or provide your own function with a single
+            int as input and int or float as output.
+        feature_range: range of data that the input data provided comes from. Used to map input data
+            to the correct domain of the feature-encoding function.
+        target_range: range of data the data encoder assumes as the natural range. For example,
+            in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*PI).
+            Used to map data to the correct domain of the feature-encoding function.
+        multiplier: overall multiplier; this is useful for reuploading the feature map serially with
+            different scalings; can be a number or parameter/expression.
+        param_prefix: string prefix to create trainable parameters multiplying the feature parameter
+            inside the feature-encoding function. Note that currently this does not take into
+            account the domain of the feature-encoding function.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import feature_map, BasisSet, ReuploadScaling
+
+    fm = feature_map(3, fm_type=BasisSet.FOURIER)
+    print(f"{fm = }")
+
+    fm = feature_map(3, fm_type=BasisSet.CHEBYSHEV)
+    print(f"{fm = }")
+
+    fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER)
+    print(f"{fm = }")
+    ```
+    """
+
+    # Process input
+    if support is None:
+        support = tuple(range(n_qubits))
+    elif len(support) != n_qubits:
+        raise ValueError("Wrong qubit support supplied")
+
+    if op not in ROTATIONS:
+        raise ValueError(
+            f"Operation {op} not supported. "
+            f"Please provide one from {[rot.__name__ for rot in ROTATIONS]}."
+        )
+
+    scaled_fparam = fm_parameter_scaling(
+        fm_type, param, feature_range=feature_range, target_range=target_range
+    )
+
+    transform_func = fm_parameter_func(fm_type)
+
+    basis_tag = fm_type.value if isinstance(fm_type, BasisSet) else str(fm_type)
+    rs_func, rs_tag = fm_reupload_scaling_fn(reupload_scaling)
+
+    # Set overall multiplier
+    multiplier = 1 if multiplier is None else Parameter(multiplier)
+
+    # Build feature map
+    op_list = []
+    fparam = scaled_fparam
+    for i, qubit in enumerate(support):
+        if param_prefix is not None:
+            train_param = VariationalParameter(param_prefix + f"_{i}")
+            fparam = train_param * scaled_fparam
+        op_list.append(op(qubit, multiplier * rs_func(i) * transform_func(fparam)))
+    fm = kron(*op_list)
+
+    fm.tag = rs_tag + " " + basis_tag + " FM"
+
+    return fm
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ hea(n_qubits, depth=1, param_prefix='theta', support=None, strategy=Strategy.DIGITAL, **strategy_args) + +

+ + +
+ +

Factory function for the Hardware Efficient Ansatz (HEA).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

number of qubits in the block

+
+

+ + TYPE: + int + +

+
depth +
+

number of layers of the HEA

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
param_prefix +
+

the base name of the variational parameters

+
+

+ + TYPE: + str + + + DEFAULT: + 'theta' + +

+
support +
+

qubit indexes where the HEA is applied

+
+

+ + TYPE: + tuple[int, ...] + + + DEFAULT: + None + +

+
strategy +
+

Strategy.Digital or Strategy.DigitalAnalog

+
+

+ + TYPE: + Strategy + + + DEFAULT: + DIGITAL + +

+
**strategy_args +
+

see below

+
+

+ + TYPE: + Any + + + DEFAULT: + {} + +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
operations +
+

list of operations to cycle through in the +digital single-qubit rotations of each layer. Valid for +Digital and DigitalAnalog HEA.

+
+

+ + TYPE: + list + +

+
periodic +
+

if the qubits should be linked periodically. +periodic=False is not supported in emu-c. Valid for only +for Digital HEA.

+
+

+ + TYPE: + bool + +

+
entangler +
+
    +
  • Digital: 2-qubit entangling operation. Supports CNOT, CZ, +CRX, CRY, CRZ, CPHASE. Controlled rotations will have variational +parameters on the rotation angles.
  • +
  • DigitaAnalog | Analog: Hamiltonian generator for the +analog entangling layer. Defaults to global ZZ Hamiltonian. +Time parameter is considered variational.
  • +
+
+

+ + TYPE: + AbstractBlock + +

+
+

Examples: +

from qadence import RZ, RX
+from qadence import hea
+
+# create the circuit
+n_qubits, depth = 2, 4
+ansatz = hea(
+    n_qubits=n_qubits,
+    depth=depth,
+    strategy="sDAQC",
+    operations=[RZ,RX,RZ]
+)
+
+
+ +

+
+ +

+ +
+ Source code in qadence/constructors/ansatze.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
def hea(
+    n_qubits: int,
+    depth: int = 1,
+    param_prefix: str = "theta",
+    support: tuple[int, ...] = None,
+    strategy: Strategy = Strategy.DIGITAL,
+    **strategy_args: Any,
+) -> AbstractBlock:
+    """
+    Factory function for the Hardware Efficient Ansatz (HEA).
+
+    Args:
+        n_qubits: number of qubits in the block
+        depth: number of layers of the HEA
+        param_prefix: the base name of the variational parameters
+        support: qubit indexes where the HEA is applied
+        strategy: Strategy.Digital or Strategy.DigitalAnalog
+        **strategy_args: see below
+
+    Keyword Arguments:
+        operations (list): list of operations to cycle through in the
+            digital single-qubit rotations of each layer. Valid for
+            Digital and DigitalAnalog HEA.
+        periodic (bool): if the qubits should be linked periodically.
+            periodic=False is not supported in emu-c. Valid for only
+            for Digital HEA.
+        entangler (AbstractBlock):
+            - Digital: 2-qubit entangling operation. Supports CNOT, CZ,
+            CRX, CRY, CRZ, CPHASE. Controlled rotations will have variational
+            parameters on the rotation angles.
+            - DigitaAnalog | Analog: Hamiltonian generator for the
+            analog entangling layer. Defaults to global ZZ Hamiltonian.
+            Time parameter is considered variational.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import RZ, RX
+    from qadence import hea
+
+    # create the circuit
+    n_qubits, depth = 2, 4
+    ansatz = hea(
+        n_qubits=n_qubits,
+        depth=depth,
+        strategy="sDAQC",
+        operations=[RZ,RX,RZ]
+    )
+    ```
+    """
+
+    if support is None:
+        support = tuple(range(n_qubits))
+
+    hea_func_dict = {
+        Strategy.DIGITAL: hea_digital,
+        Strategy.SDAQC: hea_sDAQC,
+        Strategy.BDAQC: hea_bDAQC,
+        Strategy.ANALOG: hea_analog,
+    }
+
+    try:
+        hea_func = hea_func_dict[strategy]
+    except KeyError:
+        raise KeyError(f"Strategy {strategy} not recognized.")
+
+    hea_block: AbstractBlock = hea_func(
+        n_qubits=n_qubits,
+        depth=depth,
+        param_prefix=param_prefix,
+        support=support,
+        **strategy_args,
+    )  # type: ignore
+
+    return hea_block
+
+
+
+ +
+ +
+ + +

+ hea_digital(n_qubits, depth=1, param_prefix='theta', periodic=False, operations=[RX, RY, RX], support=None, entangler=CNOT) + +

+ + +
+ +

Construct the Digital Hardware Efficient Ansatz (HEA).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

number of qubits in the block.

+
+

+ + TYPE: + int + +

+
depth +
+

number of layers of the HEA.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
param_prefix +
+

the base name of the variational parameters

+
+

+ + TYPE: + str + + + DEFAULT: + 'theta' + +

+
periodic +
+

if the qubits should be linked periodically. +periodic=False is not supported in emu-c.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
operations +
+

list of operations to cycle through in the +digital single-qubit rotations of each layer.

+
+

+ + TYPE: + list + + + DEFAULT: + [RX, RY, RX] + +

+
support +
+

qubit indexes where the HEA is applied.

+
+

+ + TYPE: + tuple + + + DEFAULT: + None + +

+
entangler +
+

2-qubit entangling operation. +Supports CNOT, CZ, CRX, CRY, CRZ. Controlld rotations +will have variational parameters on the rotation angles.

+
+

+ + TYPE: + AbstractBlock + + + DEFAULT: + CNOT + +

+
+ +
+ Source code in qadence/constructors/ansatze.py +
def hea_digital(
+    n_qubits: int,
+    depth: int = 1,
+    param_prefix: str = "theta",
+    periodic: bool = False,
+    operations: list[type[AbstractBlock]] = [RX, RY, RX],
+    support: tuple[int, ...] = None,
+    entangler: Type[DigitalEntanglers] = CNOT,
+) -> AbstractBlock:
+    """
+    Construct the Digital Hardware Efficient Ansatz (HEA).
+
+    Args:
+        n_qubits (int): number of qubits in the block.
+        depth (int): number of layers of the HEA.
+        param_prefix (str): the base name of the variational parameters
+        periodic (bool): if the qubits should be linked periodically.
+            periodic=False is not supported in emu-c.
+        operations (list): list of operations to cycle through in the
+            digital single-qubit rotations of each layer.
+        support (tuple): qubit indexes where the HEA is applied.
+        entangler (AbstractBlock): 2-qubit entangling operation.
+            Supports CNOT, CZ, CRX, CRY, CRZ. Controlld rotations
+            will have variational parameters on the rotation angles.
+    """
+    try:
+        if entangler not in [CNOT, CZ, CRX, CRY, CRZ, CPHASE]:
+            raise ValueError(
+                "Please provide a valid two-qubit entangler operation for digital HEA."
+            )
+    except TypeError:
+        raise ValueError("Please provide a valid two-qubit entangler operation for digital HEA.")
+
+    rot_list = _rotations_digital(
+        n_qubits=n_qubits,
+        depth=depth,
+        param_prefix=param_prefix,
+        support=support,
+        operations=operations,
+    )
+
+    ent_list = _entanglers_digital(
+        n_qubits=n_qubits,
+        depth=depth,
+        param_prefix=param_prefix,
+        support=support,
+        periodic=periodic,
+        entangler=entangler,
+    )
+
+    layers = []
+    for d in range(depth):
+        layers.append(rot_list[d])
+        layers.append(ent_list[d])
+    return tag(chain(*layers), "HEA")
+
+
+
+ +
+ +
+ + +

+ hea_sDAQC(n_qubits, depth=1, param_prefix='theta', operations=[RX, RY, RX], support=None, entangler=None) + +

+ + +
+ +

Construct the Hardware Efficient Ansatz (HEA) with analog entangling layers.

+

It uses step-wise digital-analog computation.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

number of qubits in the block.

+
+

+ + TYPE: + int + +

+
depth +
+

number of layers of the HEA.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
param_prefix +
+

the base name of the variational parameters

+
+

+ + TYPE: + str + + + DEFAULT: + 'theta' + +

+
operations +
+

list of operations to cycle through in the +digital single-qubit rotations of each layer.

+
+

+ + TYPE: + list + + + DEFAULT: + [RX, RY, RX] + +

+
support +
+

qubit indexes where the HEA is applied.

+
+

+ + TYPE: + tuple + + + DEFAULT: + None + +

+
entangler +
+

Hamiltonian generator for the +analog entangling layer. Defaults to global ZZ Hamiltonian. +Time parameter is considered variational.

+
+

+ + TYPE: + AbstractBlock + + + DEFAULT: + None + +

+
+ +
+ Source code in qadence/constructors/ansatze.py +
def hea_sDAQC(
+    n_qubits: int,
+    depth: int = 1,
+    param_prefix: str = "theta",
+    operations: list[type[AbstractBlock]] = [RX, RY, RX],
+    support: tuple[int, ...] = None,
+    entangler: AbstractBlock | None = None,
+) -> AbstractBlock:
+    """
+    Construct the Hardware Efficient Ansatz (HEA) with analog entangling layers.
+
+    It uses step-wise digital-analog computation.
+
+    Args:
+        n_qubits (int): number of qubits in the block.
+        depth (int): number of layers of the HEA.
+        param_prefix (str): the base name of the variational parameters
+        operations (list): list of operations to cycle through in the
+            digital single-qubit rotations of each layer.
+        support (tuple): qubit indexes where the HEA is applied.
+        entangler (AbstractBlock): Hamiltonian generator for the
+            analog entangling layer. Defaults to global ZZ Hamiltonian.
+            Time parameter is considered variational.
+    """
+
+    # TODO: Add qubit support
+    if entangler is None:
+        entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN)
+    try:
+        if not block_is_qubit_hamiltonian(entangler):
+            raise ValueError(
+                "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA."
+            )
+    except NotImplementedError:
+        raise ValueError(
+            "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA."
+        )
+
+    rot_list = _rotations_digital(
+        n_qubits=n_qubits,
+        depth=depth,
+        param_prefix=param_prefix,
+        support=support,
+        operations=operations,
+    )
+
+    ent_list = _entanglers_analog(
+        depth=depth,
+        param_prefix=param_prefix,
+        entangler=entangler,
+    )
+
+    layers = []
+    for d in range(depth):
+        layers.append(rot_list[d])
+        layers.append(ent_list[d])
+    return tag(chain(*layers), "HEA-sDA")
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ ObservableConfig(detuning, scale=1.0, shift=0.0, transformation_type=ObservableTransform.NONE, trainable_transform=None) + + + dataclass + + +

+ + +
+ + + + + +
+ + + + + + + +
+ + + +

+ detuning: TDetuning + + + instance-attribute + + +

+ + +
+ +

Single qubit detuning of the observable Hamiltonian.

+

Accepts single-qubit operator N, X, Y, or Z.

+
+ +
+ +
+ + + +

+ scale: TParameter = 1.0 + + + class-attribute + instance-attribute + + +

+ + +
+ +

The scale by which to multiply the output of the observable.

+
+ +
+ +
+ + + +

+ shift: TParameter = 0.0 + + + class-attribute + instance-attribute + + +

+ + +
+ +

The shift to add to the output of the observable.

+
+ +
+ +
+ + + +

+ trainable_transform: bool | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Whether to have a trainable transformation on the output of the observable.

+

If None, the scale and shift are numbers. +If True, the scale and shift are VariationalParameter. +If False, the scale and shift are FeatureParameter.

+
+ +
+ +
+ + + +

+ transformation_type: ObservableTransform = ObservableTransform.NONE + + + class-attribute + instance-attribute + + +

+ + +
+ +

The type of transformation.

+
+ +
+ + + + + +
+ +
+ +
+ + +
+ + +

+ hamiltonian_factory(register, interaction=None, detuning=None, interaction_strength=None, detuning_strength=None, random_strength=False, use_all_node_pairs=False) + +

+ + +
+ +

General Hamiltonian creation function.

+

Can be used to create Hamiltonians with 2-qubit +interactions and single-qubit detunings, both with arbitrary strength or parameterized.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
register +
+

register of qubits with a specific graph topology, or number of qubits. +When passing a number of qubits a register with all-to-all connectivity +is created.

+
+

+ + TYPE: + Register | int + +

+
interaction +
+

Interaction.ZZ, Interaction.NN, Interaction.XY, or Interacton.XYZ.

+
+

+ + TYPE: + Interaction | Callable | None + + + DEFAULT: + None + +

+
detuning +
+

single-qubit operator N, X, Y, or Z.

+
+

+ + TYPE: + TDetuning | None + + + DEFAULT: + None + +

+
interaction_strength +
+

list of values to be used as the interaction strength for each +pair of qubits. Should be ordered following the order of Register(n_qubits).edges. +Alternatively, some string "x" can be passed, which will create a parameterized +interactions for each pair of qubits, each labelled as "x_ij".

+
+

+ + TYPE: + TArray | str | None + + + DEFAULT: + None + +

+
detuning_strength +
+

list of values to be used as the detuning strength for each qubit. +Alternatively, some string "x" can be passed, which will create a parameterized +detuning for each qubit, each labelled as "x_i".

+
+

+ + TYPE: + TArray | str | None + + + DEFAULT: + None + +

+
random_strength +
+

set random interaction and detuning strengths between -1 and 1.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
use_all_node_pairs +
+

computes an interaction term for every pair of nodes in the graph, +independent of the edge topology in the register. Useful for defining Hamiltonians +where the interaction strength decays with the distance.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
+ + +

Examples:

+
from qadence import hamiltonian_factory, Interaction, Register, Z
+
+n_qubits = 3
+
+# Constant total magnetization observable:
+observable = hamiltonian_factory(n_qubits, detuning = Z)
+
+# Parameterized total magnetization observable:
+observable = hamiltonian_factory(n_qubits, detuning = Z, detuning_strength = "z")
+
+# Random all-to-all XY Hamiltonian generator:
+generator = hamiltonian_factory(
+    n_qubits,
+    interaction = Interaction.XY,
+    random_strength = True,
+    )
+
+# Parameterized NN Hamiltonian generator with a square grid interaction topology:
+register = Register.square(qubits_side = n_qubits)
+generator = hamiltonian_factory(
+    register,
+    interaction = Interaction.NN,
+    interaction_strength = "theta"
+    )
+
+
+ +

+
+ +
+ +
+ Source code in qadence/constructors/hamiltonians.py +
def hamiltonian_factory(
+    register: Register | int,
+    interaction: Interaction | Callable | None = None,
+    detuning: TDetuning | None = None,
+    interaction_strength: TArray | str | None = None,
+    detuning_strength: TArray | str | None = None,
+    random_strength: bool = False,
+    use_all_node_pairs: bool = False,
+) -> AbstractBlock:
+    """
+    General Hamiltonian creation function.
+
+    Can be used to create Hamiltonians with 2-qubit
+    interactions and single-qubit detunings, both with arbitrary strength or parameterized.
+
+    Arguments:
+        register: register of qubits with a specific graph topology, or number of qubits.
+            When passing a number of qubits a register with all-to-all connectivity
+            is created.
+        interaction: Interaction.ZZ, Interaction.NN, Interaction.XY, or Interacton.XYZ.
+        detuning: single-qubit operator N, X, Y, or Z.
+        interaction_strength: list of values to be used as the interaction strength for each
+            pair of qubits. Should be ordered following the order of `Register(n_qubits).edges`.
+            Alternatively, some string "x" can be passed, which will create a parameterized
+            interactions for each pair of qubits, each labelled as `"x_ij"`.
+        detuning_strength: list of values to be used as the detuning strength for each qubit.
+            Alternatively, some string "x" can be passed, which will create a parameterized
+            detuning for each qubit, each labelled as `"x_i"`.
+        random_strength: set random interaction and detuning strengths between -1 and 1.
+        use_all_node_pairs: computes an interaction term for every pair of nodes in the graph,
+            independent of the edge topology in the register. Useful for defining Hamiltonians
+            where the interaction strength decays with the distance.
+
+    Examples:
+        ```python exec="on" source="material-block" result="json"
+        from qadence import hamiltonian_factory, Interaction, Register, Z
+
+        n_qubits = 3
+
+        # Constant total magnetization observable:
+        observable = hamiltonian_factory(n_qubits, detuning = Z)
+
+        # Parameterized total magnetization observable:
+        observable = hamiltonian_factory(n_qubits, detuning = Z, detuning_strength = "z")
+
+        # Random all-to-all XY Hamiltonian generator:
+        generator = hamiltonian_factory(
+            n_qubits,
+            interaction = Interaction.XY,
+            random_strength = True,
+            )
+
+        # Parameterized NN Hamiltonian generator with a square grid interaction topology:
+        register = Register.square(qubits_side = n_qubits)
+        generator = hamiltonian_factory(
+            register,
+            interaction = Interaction.NN,
+            interaction_strength = "theta"
+            )
+        ```
+    """
+
+    if interaction is None and detuning is None:
+        raise ValueError("Please provide an interaction and/or detuning for the Hamiltonian.")
+
+    # If number of qubits is given, creates all-to-all register
+    register = Register(register) if isinstance(register, int) else register
+
+    # Get interaction function
+    if interaction is not None:
+        if callable(interaction):
+            int_fn = interaction
+            try:
+                if not block_is_qubit_hamiltonian(interaction(0, 1)):
+                    raise ValueError("Custom interactions must be composed of Pauli operators.")
+            except TypeError:
+                raise TypeError(
+                    "Please use a custom interaction function signed with two integer parameters."
+                )
+        else:
+            int_fn = INTERACTION_DICT.get(interaction, None)  # type: ignore [arg-type]
+            if int_fn is None:
+                raise KeyError(f"Interaction {interaction} not supported.")
+
+    # Check single-qubit detuning
+    if (detuning is not None) and (detuning not in DETUNINGS):
+        raise TypeError(f"Detuning of type {type(detuning)} not supported.")
+
+    # Pre-process detuning and interaction strengths and update register
+    detuning_strength_array = _preprocess_strengths(
+        register, detuning_strength, "nodes", random_strength
+    )
+
+    edge_str = "all_node_pairs" if use_all_node_pairs else "edges"
+    interaction_strength_array = _preprocess_strengths(
+        register, interaction_strength, edge_str, random_strength
+    )
+
+    # Create single-qubit detunings:
+    single_qubit_terms: List[AbstractBlock] = []
+    if detuning is not None:
+        for strength, node in zip(detuning_strength_array, register.nodes):
+            single_qubit_terms.append(strength * detuning(node))
+
+    # Create two-qubit interactions:
+    two_qubit_terms: List[AbstractBlock] = []
+    edge_data = register.all_node_pairs if use_all_node_pairs else register.edges
+    if interaction is not None and int_fn is not None:
+        for strength, edge in zip(interaction_strength_array, edge_data):
+            two_qubit_terms.append(strength * int_fn(*edge))
+
+    return add(*single_qubit_terms, *two_qubit_terms)
+
+
+
+ +
+ +
+ + +

+ interaction_nn(i, j) + +

+ + +
+ +

Ising NN interaction.

+ +
+ Source code in qadence/constructors/hamiltonians.py +
24
+25
+26
def interaction_nn(i: int, j: int) -> AbstractBlock:
+    """Ising NN interaction."""
+    return N(i) @ N(j)
+
+
+
+ +
+ +
+ + +

+ interaction_xy(i, j) + +

+ + +
+ +

XY interaction.

+ +
+ Source code in qadence/constructors/hamiltonians.py +
29
+30
+31
def interaction_xy(i: int, j: int) -> AbstractBlock:
+    """XY interaction."""
+    return X(i) @ X(j) + Y(i) @ Y(j)
+
+
+
+ +
+ +
+ + +

+ interaction_xyz(i, j) + +

+ + +
+ +

Heisenberg XYZ interaction.

+ +
+ Source code in qadence/constructors/hamiltonians.py +
34
+35
+36
def interaction_xyz(i: int, j: int) -> AbstractBlock:
+    """Heisenberg XYZ interaction."""
+    return X(i) @ X(j) + Y(i) @ Y(j) + Z(i) @ Z(j)
+
+
+
+ +
+ +
+ + +

+ interaction_zz(i, j) + +

+ + +
+ +

Ising ZZ interaction.

+ +
+ Source code in qadence/constructors/hamiltonians.py +
19
+20
+21
def interaction_zz(i: int, j: int) -> AbstractBlock:
+    """Ising ZZ interaction."""
+    return Z(i) @ Z(j)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ qft(n_qubits, support=None, inverse=False, reverse_in=False, swaps_out=False, strategy=Strategy.DIGITAL, gen_build=None) + +

+ + +
+ +

The Quantum Fourier Transform.

+

Depending on the application, user should be careful with qubit ordering +in the input and output. This can be controlled with reverse_in and swaps_out +arguments.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

number of qubits in the QFT

+
+

+ + TYPE: + int + +

+
support +
+

qubit support to use

+
+

+ + TYPE: + tuple[int, ...] + + + DEFAULT: + None + +

+
inverse +
+

True performs the inverse QFT

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
reverse_in +
+

Reverses the input qubits to account for endianness

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
swaps_out +
+

Performs swaps on the output qubits to match the "textbook" QFT.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
strategy +
+

Strategy.Digital or Strategy.sDAQC

+
+

+ + TYPE: + Strategy + + + DEFAULT: + DIGITAL + +

+
gen_build +
+

building block Ising Hamiltonian for the DAQC transform. +Defaults to constant all-to-all Ising.

+
+

+ + TYPE: + AbstractBlock | None + + + DEFAULT: + None + +

+
+ + +

Examples:

+
from qadence import qft
+
+n_qubits = 3
+
+qft_circuit = qft(n_qubits, strategy = "sDAQC")
+
+
+ +

+
+ +
+ +
+ Source code in qadence/constructors/qft.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
def qft(
+    n_qubits: int,
+    support: tuple[int, ...] = None,
+    inverse: bool = False,
+    reverse_in: bool = False,
+    swaps_out: bool = False,
+    strategy: Strategy = Strategy.DIGITAL,
+    gen_build: AbstractBlock | None = None,
+) -> AbstractBlock:
+    """
+    The Quantum Fourier Transform.
+
+    Depending on the application, user should be careful with qubit ordering
+    in the input and output. This can be controlled with reverse_in and swaps_out
+    arguments.
+
+    Args:
+        n_qubits: number of qubits in the QFT
+        support: qubit support to use
+        inverse: True performs the inverse QFT
+        reverse_in: Reverses the input qubits to account for endianness
+        swaps_out: Performs swaps on the output qubits to match the "textbook" QFT.
+        strategy: Strategy.Digital or Strategy.sDAQC
+        gen_build: building block Ising Hamiltonian for the DAQC transform.
+            Defaults to constant all-to-all Ising.
+
+    Examples:
+        ```python exec="on" source="material-block" result="json"
+        from qadence import qft
+
+        n_qubits = 3
+
+        qft_circuit = qft(n_qubits, strategy = "sDAQC")
+        ```
+    """
+
+    if support is None:
+        support = tuple(range(n_qubits))
+
+    assert len(support) <= n_qubits, "Wrong qubit support supplied"
+
+    if reverse_in:
+        support = support[::-1]
+
+    qft_layer_dict = {
+        Strategy.DIGITAL: _qft_layer_digital,
+        Strategy.SDAQC: _qft_layer_sDAQC,
+        Strategy.BDAQC: _qft_layer_bDAQC,
+        Strategy.ANALOG: _qft_layer_analog,
+    }
+
+    try:
+        layer_func = qft_layer_dict[strategy]
+    except KeyError:
+        raise KeyError(f"Strategy {strategy} not recognized.")
+
+    qft_layers = reversed(range(n_qubits)) if inverse else range(n_qubits)
+
+    qft_circ = chain(
+        layer_func(
+            n_qubits=n_qubits, support=support, layer=layer, inverse=inverse, gen_build=gen_build
+        )  # type: ignore
+        for layer in qft_layers
+    )
+
+    if swaps_out:
+        swap_ops = [SWAP(support[i], support[n_qubits - i - 1]) for i in range(n_qubits // 2)]
+        qft_circ = chain(*swap_ops, qft_circ) if inverse else chain(qft_circ, *swap_ops)
+
+    return tag(qft_circ, tag="iQFT") if inverse else tag(qft_circ, tag="QFT")
+
+
+
+ +
+ + + +
+ +
+ +

Hardware efficient ansatz for Rydberg atom arrays

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ rydberg_hea(register, n_layers=1, addressable_detuning=True, addressable_drive=False, tunable_phase=False, additional_prefix=None) + +

+ + +
+ +

Hardware efficient ansatz for neutral atom (Rydberg) platforms.

+

This constructor implements a variational ansatz which is very close to +what is implementable on 2nd generation PASQAL quantum devices. In particular, +it implements evolution over a specific Hamiltonian which can be realized on +the device. This Hamiltonian contains:

+
    +
  • +

    an interaction term given by the standard NN interaction and determined starting + from the positions in the input register: Hᵢₙₜ = ∑ᵢⱼ C₆/rᵢⱼ⁶ nᵢnⱼ

    +
  • +
  • +

    a detuning term which corresponding to a n_i = (1+sigma_i^z)/2 applied to + all the qubits. If the addressable_detuning flag is set to True, the routine + effectively a local n_i = (1+sigma_i^z)/2 term in the + evolved Hamiltonian with a different coefficient for each atom. These + coefficients determine a local addressing pattern for the detuning on a subset + of the qubits. In this routine, the coefficients are variational parameters + and they will therefore be optimized at each optimizer step

    +
  • +
  • +

    a drive term which corresponding to a sigma^x evolution operation applied to + all the qubits. If the addressable_drive flag is set to True, the routine + effectively a local sigma_i^x term in the evolved Hamiltonian with a different + coefficient for each atom. These coefficients determine a local addressing pattern + for the drive on a subset of the qubits. In this routine, the coefficients are + variational parameters and they will therefore be optimized at each optimizer step

    +
  • +
  • +

    if the tunable_phase flag is set to True, the drive term is modified in the following + way: drive = cos(phi) * sigma^x - sin(phi) * sigma^y + The addressable pattern above is maintained and the phase is considered just as an + additional variational parameter which is optimized with the rest

    +
  • +
+

Notice that, on real devices, the coefficients assigned to each qubit in both the detuning +and drive patterns should be non-negative and they should always sum to 1. This is not the +case for the implementation in this routine since the coefficients (weights) do not have any +constraint. Therefore, this HEA is not completely realizable on neutral atom devices.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
register +
+

the input atomic register with Cartesian coordinates.

+
+

+ + TYPE: + Register + +

+
n_layers +
+

number layers in the HEA, each layer includes a drive, detuning and +pure interaction pulses whose is a variational parameter

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
addressable_detuning +
+

whether to turn on the trainable semi-local addressing pattern +on the detuning (n_i terms in the Hamiltonian)

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
addressable_drive +
+

whether to turn on the trainable semi-local addressing pattern +on the drive (sigma_i^x terms in the Hamiltonian)

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
tunable_phase +
+

whether to have a tunable phase to get both sigma^x and sigma^y rotations +in the drive term. If False, only a sigma^x term will be included in the drive part +of the Hamiltonian generator

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
additional_prefix +
+

an additional prefix to attach to the parameter names

+
+

+ + TYPE: + str + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ChainBlock + + +
+

The Rydberg HEA block

+
+
+ +
+ Source code in qadence/constructors/rydberg_hea.py +
def rydberg_hea(
+    register: qd.Register,
+    n_layers: int = 1,
+    addressable_detuning: bool = True,
+    addressable_drive: bool = False,
+    tunable_phase: bool = False,
+    additional_prefix: str = None,
+) -> qd.blocks.ChainBlock:
+    """Hardware efficient ansatz for neutral atom (Rydberg) platforms.
+
+    This constructor implements a variational ansatz which is very close to
+    what is implementable on 2nd generation PASQAL quantum devices. In particular,
+    it implements evolution over a specific Hamiltonian which can be realized on
+    the device. This Hamiltonian contains:
+
+    * an interaction term given by the standard NN interaction and determined starting
+        from the positions in the input register: Hᵢₙₜ = ∑ᵢⱼ C₆/rᵢⱼ⁶ nᵢnⱼ
+
+    * a detuning term which corresponding to a n_i = (1+sigma_i^z)/2 applied to
+        all the qubits. If the `addressable_detuning` flag is set to True, the routine
+        effectively a local n_i = (1+sigma_i^z)/2 term in the
+        evolved Hamiltonian with a different coefficient for each atom. These
+        coefficients determine a local addressing pattern for the detuning on a subset
+        of the qubits. In this routine, the coefficients are variational parameters
+        and they will therefore be optimized at each optimizer step
+
+    * a drive term which corresponding to a sigma^x evolution operation applied to
+        all the qubits. If the `addressable_drive` flag is set to True, the routine
+        effectively a local sigma_i^x term in the evolved Hamiltonian with a different
+        coefficient for each atom. These coefficients determine a local addressing pattern
+        for the drive on a subset of the qubits. In this routine, the coefficients are
+        variational parameters and they will therefore be optimized at each optimizer step
+
+    * if the `tunable_phase` flag is set to True, the drive term is modified in the following
+        way: drive = cos(phi) * sigma^x - sin(phi) * sigma^y
+        The addressable pattern above is maintained and the phase is considered just as an
+        additional variational parameter which is optimized with the rest
+
+    Notice that, on real devices, the coefficients assigned to each qubit in both the detuning
+    and drive patterns should be non-negative and they should always sum to 1. This is not the
+    case for the implementation in this routine since the coefficients (weights) do not have any
+    constraint. Therefore, this HEA is not completely realizable on neutral atom devices.
+
+    Args:
+        register: the input atomic register with Cartesian coordinates.
+        n_layers: number layers in the HEA, each layer includes a drive, detuning and
+            pure interaction pulses whose is a variational parameter
+        addressable_detuning: whether to turn on the trainable semi-local addressing pattern
+            on the detuning (n_i terms in the Hamiltonian)
+        addressable_drive: whether to turn on the trainable semi-local addressing pattern
+            on the drive (sigma_i^x terms in the Hamiltonian)
+        tunable_phase: whether to have a tunable phase to get both sigma^x and sigma^y rotations
+            in the drive term. If False, only a sigma^x term will be included in the drive part
+            of the Hamiltonian generator
+        additional_prefix: an additional prefix to attach to the parameter names
+
+    Returns:
+        The Rydberg HEA block
+    """
+    n_qubits = register.n_qubits
+    prefix = "" if additional_prefix is None else "_" + additional_prefix
+
+    detunings = None
+    # add a detuning pattern locally addressing the atoms
+    if addressable_detuning:
+        detunings = [qd.VariationalParameter(f"detmap_{j}") for j in range(n_qubits)]
+
+    drives = None
+    # add a drive pattern locally addressing the atoms
+    if addressable_drive:
+        drives = [qd.VariationalParameter(f"drivemap_{j}") for j in range(n_qubits)]
+
+    phase = None
+    if tunable_phase:
+        phase = qd.VariationalParameter("phase")
+
+    return chain(
+        rydberg_hea_layer(
+            register,
+            VariationalParameter(f"At{prefix}_{layer}"),
+            VariationalParameter(f"Omega{prefix}_{layer}"),
+            VariationalParameter(f"wait{prefix}_{layer}"),
+            detunings=detunings,
+            drives=drives,
+            phase=phase,
+        )
+        for layer in range(n_layers)
+    )
+
+
+
+ +
+ +
+ + +

+ rydberg_hea_layer(register, tevo_drive, tevo_det, tevo_wait, phase=None, detunings=None, drives=None, drive_scaling=1.0) + +

+ + +
+ +

A single layer of the Rydberg hardware efficient ansatz.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
register +
+

the input register with atomic coordinates needed to build the interaction.

+
+

+ + TYPE: + Register + +

+
tevo_drive +
+

a variational parameter for the duration of the drive term of +the Hamiltonian generator, including optional semi-local addressing

+
+

+ + TYPE: + Parameter | float + +

+
tevo_det +
+

a variational parameter for the duration of the detuning term of the +Hamiltonian generator, including optional semi-local addressing

+
+

+ + TYPE: + Parameter | float + +

+
tevo_wait +
+

a variational parameter for the duration of the waiting +time with interaction only

+
+

+ + TYPE: + Parameter | float + +

+
phase +
+

a variational parameter representing the global phase. If None, the +global phase is set to 0 which results in a drive term in sigma^x only. Otherwise +both sigma^x and sigma^y terms will be present

+
+

+ + TYPE: + Parameter | float | None + + + DEFAULT: + None + +

+
detunings +
+

a list of parameters with the weights of the locally addressed +detuning terms. These are variational parameters which are tuned by the optimizer

+
+

+ + TYPE: + list[Parameter] | list[float] | None + + + DEFAULT: + None + +

+
drives +
+

a list of parameters with the weights of the locally addressed +drive terms. These are variational parameters which are tuned by the optimizer

+
+

+ + TYPE: + list[Parameter] | list[float] | None + + + DEFAULT: + None + +

+
drive_scaling +
+

a scaling term to be added to the drive Hamiltonian generator

+
+

+ + TYPE: + float + + + DEFAULT: + 1.0 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ChainBlock + + +
+

A block with a single layer of Rydberg HEA

+
+
+ +
+ Source code in qadence/constructors/rydberg_hea.py +
def rydberg_hea_layer(
+    register: qd.Register,
+    tevo_drive: Parameter | float,
+    tevo_det: Parameter | float,
+    tevo_wait: Parameter | float,
+    phase: Parameter | float | None = None,
+    detunings: list[Parameter] | list[float] | None = None,
+    drives: list[Parameter] | list[float] | None = None,
+    drive_scaling: float = 1.0,
+) -> ChainBlock:
+    """A single layer of the Rydberg hardware efficient ansatz.
+
+    Args:
+        register: the input register with atomic coordinates needed to build the interaction.
+        tevo_drive: a variational parameter for the duration of the drive term of
+            the Hamiltonian generator, including optional semi-local addressing
+        tevo_det: a variational parameter for the duration of the detuning term of the
+            Hamiltonian generator, including optional semi-local addressing
+        tevo_wait: a variational parameter for the duration of the waiting
+            time with interaction only
+        phase: a variational parameter representing the global phase. If None, the
+            global phase is set to 0 which results in a drive term in sigma^x only. Otherwise
+            both sigma^x and sigma^y terms will be present
+        detunings: a list of parameters with the weights of the locally addressed
+            detuning terms. These are variational parameters which are tuned by the optimizer
+        drives: a list of parameters with the weights of the locally addressed
+            drive terms. These are variational parameters which are tuned by the optimizer
+        drive_scaling: a scaling term to be added to the drive Hamiltonian generator
+
+    Returns:
+        A block with a single layer of Rydberg HEA
+    """
+    n_qubits = register.n_qubits
+
+    drive_x = _amplitude_map(n_qubits, qd.X, weights=drives)
+    drive_y = _amplitude_map(n_qubits, qd.Y, weights=drives)
+    detuning = _amplitude_map(n_qubits, qd.N, weights=detunings)
+    interaction = hamiltonian_factory(register, qd.Interaction.NN)
+
+    # drive and interaction are not commuting thus they need to be
+    # added directly into the final Hamiltonian generator
+    if phase is not None:
+        generator = (
+            drive_scaling * sympy.cos(phase) * drive_x
+            - drive_scaling * sympy.sin(phase) * drive_y
+            + interaction
+        )
+    else:
+        generator = drive_scaling * drive_x + interaction
+
+    return chain(
+        qd.HamEvo(generator, tevo_drive),
+        # detuning and interaction are commuting, so they
+        # can be ordered arbitrarily and treated separately
+        qd.HamEvo(interaction, tevo_wait),
+        qd.HamEvo(detuning, tevo_det),
+    )
+
+
+
+ +
+ + + +
+ +
+ +

The DAQC Transform

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ daqc_transform(n_qubits, gen_target, t_f, gen_build=None, zero_tol=1e-08, strategy=Strategy.SDAQC, ignore_global_phases=False) + +

+ + +
+ +

Implements the DAQC transform for representing an arbitrary 2-body Hamiltonian.

+

The result is another fixed 2-body Hamiltonian.

+

Reference for universality of 2-body Hamiltonians:

+

-- https://arxiv.org/abs/quant-ph/0106064

+

Based on the transformation for Ising (ZZ) interactions, as described in the paper

+

-- https://arxiv.org/abs/1812.03637

+

The transform translates a target weighted generator of the type:

+
`gen_target = add(g_jk * kron(op(j), op(k)) for j < k)`
+
+

To a circuit using analog evolutions with a fixed building block generator:

+
`gen_build = add(f_jk * kron(op(j), op(k)) for j < k)`
+
+

where op = Z or op = N.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

total number of qubits to use.

+
+

+ + TYPE: + int + +

+
gen_target +
+

target generator built with the structure above. The type +of the generator will be automatically evaluated when parsing.

+
+

+ + TYPE: + AbstractBlock + +

+
t_f +
+

total time for the gen_target evolution.

+
+

+ + TYPE: + float + +

+
gen_build +
+

fixed generator to act as a building block. Defaults to +constant NN: add(1.0 * kron(N(j), N(k)) for j < k). The type +of the generator will be automatically evaluated when parsing.

+
+

+ + TYPE: + AbstractBlock | None + + + DEFAULT: + None + +

+
zero_tol +
+

default "zero" for a missing interaction. Included for +numerical reasons, see notes below.

+
+

+ + TYPE: + float + + + DEFAULT: + 1e-08 + +

+
strategy +
+

sDAQC or bDAQC, following definitions in the reference paper.

+
+

+ + TYPE: + Strategy + + + DEFAULT: + SDAQC + +

+
ignore_global_phases +
+

if True the transform does not correct the global +phases coming from the mapping between ZZ and NN interactions.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
+

Notes:

+
The paper follows an index convention of running from 1 to N. A few functions
+here also use that convention to be consistent with the paper. However, for qadence
+related things the indices are converted to [0, N-1].
+
+The case for `n_qubits = 4` is an edge case where the sign matrix is not invertible.
+There is a workaround for this described in the paper, but it is currently not implemented.
+
+The current implementation may result in evolution times that are both positive or
+negative. In practice, both can be represented by simply changing the signs of the
+interactions. However, for a real implementation where the interactions should remain
+fixed, the paper discusses a workaround that is not currently implemented.
+
+The transformation works by representing each interaction in the target hamiltonian by
+a set of evolutions using the build hamiltonian. As a consequence, some care must be
+taken when choosing the build hamiltonian. Some cases:
+
+- The target hamiltonian can have any interaction, as long as it is sufficiently
+represented in the build hamiltonian. E.g., if the interaction `g_01 * kron(Z(0), Z(1))`
+is in the target hamiltonian, the corresponding interaction `f_01 * kron(Z(0), Z(1))`
+needs to be in the build hamiltonian. This is checked when the generators are parsed.
+
+- The build hamiltonian can have any interaction, irrespectively of it being needed
+for the target hamiltonian. This is especially useful for designing local operations
+through the repeated evolution of a "global" hamiltonian.
+
+- The parameter `zero_tol` controls what it means for an interaction to be "missing".
+Any interaction strength smaller than `zero_tol` in the build hamiltonian will not be
+considered, and thus that interaction is missing.
+
+- The various ratios `g_jk / f_jk` will influence the time parameter for the various
+evolution slices, meaning that if there is a big discrepancy in the interaction strength
+for a given qubit pair (j, k), the output circuit may require the usage of hamiltonian
+evolutions with very large times.
+
+- A warning will be issued for evolution times larger than `1/sqrt(zero_tol)`. Evolution
+times smaller than `zero_tol` will not be represented.
+
+ + +

Examples:

+
from qadence import Z, N, daqc_transform
+
+n_qubits = 3
+
+gen_build = 0.5 * (N(0)@N(1)) + 0.7 * (N(1)@N(2)) + 0.2 * (N(0)@N(2))
+
+gen_target = 0.1 * (Z(1)@Z(2))
+
+t_f = 2.0
+
+transformed_circuit = daqc_transform(
+    n_qubits = n_qubits,
+    gen_target = gen_target,
+    t_f = t_f,
+    gen_build = gen_build,
+)
+
+
+ +

+
+ +
+ +
+ Source code in qadence/constructors/daqc/daqc.py +
def daqc_transform(
+    n_qubits: int,
+    gen_target: AbstractBlock,
+    t_f: float,
+    gen_build: AbstractBlock | None = None,
+    zero_tol: float = 1e-08,
+    strategy: Strategy = Strategy.SDAQC,
+    ignore_global_phases: bool = False,
+) -> AbstractBlock:
+    """
+    Implements the DAQC transform for representing an arbitrary 2-body Hamiltonian.
+
+    The result is another fixed 2-body Hamiltonian.
+
+    Reference for universality of 2-body Hamiltonians:
+
+    -- https://arxiv.org/abs/quant-ph/0106064
+
+    Based on the transformation for Ising (ZZ) interactions, as described in the paper
+
+    -- https://arxiv.org/abs/1812.03637
+
+    The transform translates a target weighted generator of the type:
+
+        `gen_target = add(g_jk * kron(op(j), op(k)) for j < k)`
+
+    To a circuit using analog evolutions with a fixed building block generator:
+
+        `gen_build = add(f_jk * kron(op(j), op(k)) for j < k)`
+
+    where `op = Z` or `op = N`.
+
+    Args:
+        n_qubits: total number of qubits to use.
+        gen_target: target generator built with the structure above. The type
+            of the generator will be automatically evaluated when parsing.
+        t_f (float): total time for the gen_target evolution.
+        gen_build: fixed generator to act as a building block. Defaults to
+            constant NN: add(1.0 * kron(N(j), N(k)) for j < k). The type
+            of the generator will be automatically evaluated when parsing.
+        zero_tol: default "zero" for a missing interaction. Included for
+            numerical reasons, see notes below.
+        strategy: sDAQC or bDAQC, following definitions in the reference paper.
+        ignore_global_phases: if `True` the transform does not correct the global
+            phases coming from the mapping between ZZ and NN interactions.
+
+    Notes:
+
+        The paper follows an index convention of running from 1 to N. A few functions
+        here also use that convention to be consistent with the paper. However, for qadence
+        related things the indices are converted to [0, N-1].
+
+        The case for `n_qubits = 4` is an edge case where the sign matrix is not invertible.
+        There is a workaround for this described in the paper, but it is currently not implemented.
+
+        The current implementation may result in evolution times that are both positive or
+        negative. In practice, both can be represented by simply changing the signs of the
+        interactions. However, for a real implementation where the interactions should remain
+        fixed, the paper discusses a workaround that is not currently implemented.
+
+        The transformation works by representing each interaction in the target hamiltonian by
+        a set of evolutions using the build hamiltonian. As a consequence, some care must be
+        taken when choosing the build hamiltonian. Some cases:
+
+        - The target hamiltonian can have any interaction, as long as it is sufficiently
+        represented in the build hamiltonian. E.g., if the interaction `g_01 * kron(Z(0), Z(1))`
+        is in the target hamiltonian, the corresponding interaction `f_01 * kron(Z(0), Z(1))`
+        needs to be in the build hamiltonian. This is checked when the generators are parsed.
+
+        - The build hamiltonian can have any interaction, irrespectively of it being needed
+        for the target hamiltonian. This is especially useful for designing local operations
+        through the repeated evolution of a "global" hamiltonian.
+
+        - The parameter `zero_tol` controls what it means for an interaction to be "missing".
+        Any interaction strength smaller than `zero_tol` in the build hamiltonian will not be
+        considered, and thus that interaction is missing.
+
+        - The various ratios `g_jk / f_jk` will influence the time parameter for the various
+        evolution slices, meaning that if there is a big discrepancy in the interaction strength
+        for a given qubit pair (j, k), the output circuit may require the usage of hamiltonian
+        evolutions with very large times.
+
+        - A warning will be issued for evolution times larger than `1/sqrt(zero_tol)`. Evolution
+        times smaller than `zero_tol` will not be represented.
+
+    Examples:
+        ```python exec="on" source="material-block" result="json"
+        from qadence import Z, N, daqc_transform
+
+        n_qubits = 3
+
+        gen_build = 0.5 * (N(0)@N(1)) + 0.7 * (N(1)@N(2)) + 0.2 * (N(0)@N(2))
+
+        gen_target = 0.1 * (Z(1)@Z(2))
+
+        t_f = 2.0
+
+        transformed_circuit = daqc_transform(
+            n_qubits = n_qubits,
+            gen_target = gen_target,
+            t_f = t_f,
+            gen_build = gen_build,
+        )
+        ```
+    """
+
+    ##################
+    # Input controls #
+    ##################
+
+    if strategy != Strategy.SDAQC:
+        raise NotImplementedError("Currently only the sDAQC transform is implemented.")
+
+    if n_qubits == 4:
+        raise NotImplementedError("DAQC transform 4-qubit edge case not implemented.")
+
+    if gen_build is None:
+        gen_build = hamiltonian_factory(n_qubits, interaction=Interaction.NN)
+
+    try:
+        if (not block_is_qubit_hamiltonian(gen_target)) or (
+            not block_is_qubit_hamiltonian(gen_build)
+        ):
+            raise ValueError(
+                "Generator block is not a qubit Hamiltonian. Only ZZ or NN interactions allowed."
+            )
+    except NotImplementedError:
+        # Happens when block_is_qubit_hamiltonian is called on something that is not a block.
+        raise TypeError(
+            "Generator block is not a qubit Hamiltonian. Only ZZ or NN interactions allowed."
+        )
+
+    #####################
+    # Generator parsing #
+    #####################
+
+    g_jk_target, mat_jk_target, target_type = _parse_generator(n_qubits, gen_target, 0.0)
+    g_jk_build, mat_jk_build, build_type = _parse_generator(n_qubits, gen_build, zero_tol)
+
+    # Get the global phase hamiltonian and single-qubit detuning hamiltonian
+    if build_type == GenDAQC.NN:
+        h_phase_build, h_sq_build = _nn_phase_and_detunings(n_qubits, mat_jk_build)
+
+    if target_type == GenDAQC.NN:
+        h_phase_target, h_sq_target = _nn_phase_and_detunings(n_qubits, mat_jk_target)
+
+    # Time re-scalings
+    if build_type == GenDAQC.ZZ and target_type == GenDAQC.NN:
+        t_star = t_f / 4.0
+    elif build_type == GenDAQC.NN and target_type == GenDAQC.ZZ:
+        t_star = 4.0 * t_f
+    else:
+        t_star = t_f
+
+    # Check if target Hamiltonian can be mapped with the build Hamiltonian
+    assert _check_compatibility(g_jk_target, g_jk_build, zero_tol)
+
+    ##################
+    # DAQC Transform #
+    ##################
+
+    # Section III A of https://arxiv.org/abs/1812.03637:
+
+    # Matrix M for the linear system, exemplified in Table I:
+    matrix_M = _build_matrix_M(n_qubits)
+
+    # Linear system mapping interaction ratios -> evolution times.
+    t_slices = torch.linalg.solve(matrix_M, g_jk_target / g_jk_build) * t_star
+
+    # ZZ-DAQC with ZZ or NN build Hamiltonian
+    daqc_slices = []
+    for m in range(2, n_qubits + 1):
+        for n in range(1, m):
+            alpha = _ix_map(n_qubits, n, m)
+            t = t_slices[alpha - 1]
+            if abs(t) > zero_tol:
+                if abs(t) > (1 / (zero_tol**0.5)):
+                    logger.warning(
+                        """
+Transformed circuit with very long evolution time.
+Make sure your target interactions are sufficiently
+represented in the build Hamiltonian."""
+                    )
+                x_gates = kron(X(n - 1), X(m - 1))
+                analog_evo = HamEvo(gen_build, t)
+                # TODO: Fix repeated X-gates
+                if build_type == GenDAQC.NN:
+                    # Local detuning at each DAQC layer for NN build Hamiltonian
+                    sq_detuning_build = HamEvo(h_sq_build, t)
+                    daqc_slices.append(chain(x_gates, sq_detuning_build, analog_evo, x_gates))
+                elif build_type == GenDAQC.ZZ:
+                    daqc_slices.append(chain(x_gates, analog_evo, x_gates))
+
+    daqc_circuit = chain(*daqc_slices)
+
+    ########################
+    # Phases and Detunings #
+    ########################
+
+    if target_type == GenDAQC.NN:
+        # Local detuning given a NN target Hamiltonian
+        sq_detuning_target = HamEvo(h_sq_target, t_f).dagger()
+        daqc_circuit = chain(sq_detuning_target, daqc_circuit)
+
+    if not ignore_global_phases:
+        if build_type == GenDAQC.NN:
+            # Constant global phase given a NN build Hamiltonian
+            global_phase_build = HamEvo(h_phase_build, t_slices.sum())
+            daqc_circuit = chain(global_phase_build, daqc_circuit)
+
+        if target_type == GenDAQC.NN:
+            # Constant global phase and given a NN target Hamiltonian
+            global_phase_target = HamEvo(h_phase_target, t_f).dagger()
+            daqc_circuit = chain(global_phase_target, daqc_circuit)
+
+    return daqc_circuit
+
+
+
+ +
+ + + +
+ +
+ +

Some utility functions

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ build_idx_fms(basis, fm_pauli, fm_strategy, n_features, n_qubits, spectrum) + +

+ + +
+ +

Builds the index feature maps based on the given parameters.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
basis +
+

Type of basis chosen for the feature map.

+
+

+ + TYPE: + str + +

+
fm_pauli +
+

The chosen Pauli rotation type.

+
+

+ + TYPE: + PrimitiveBlock type + +

+
fm_strategy +
+

The feature map strategy to be used. Possible values are +'parallel' or 'serial'.

+
+

+ + TYPE: + str + +

+
n_features +
+

The number of features.

+
+

+ + TYPE: + int + +

+
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
spectrum +
+

The chosen spectrum.

+
+

+ + TYPE: + str + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[KronBlock] + + +
+

List[KronBlock]: The list of index feature maps.

+
+
+ +
+ Source code in qadence/constructors/utils.py +
36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
def build_idx_fms(
+    basis: str,
+    fm_pauli: Type[RY],
+    fm_strategy: str,
+    n_features: int,
+    n_qubits: int,
+    spectrum: str,
+) -> list[KronBlock]:
+    """Builds the index feature maps based on the given parameters.
+
+    Args:
+        basis (str): Type of basis chosen for the feature map.
+        fm_pauli (PrimitiveBlock type): The chosen Pauli rotation type.
+        fm_strategy (str): The feature map strategy to be used. Possible values are
+            'parallel' or 'serial'.
+        n_features (int): The number of features.
+        n_qubits (int): The number of qubits.
+        spectrum (str): The chosen spectrum.
+
+    Returns:
+        List[KronBlock]: The list of index feature maps.
+    """
+    idx_fms = []
+    for i in range(n_features):
+        target_qubits = get_fm_qubits(fm_strategy, i, n_qubits, n_features)
+        param = FeatureParameter(f"x{i}")
+        block = kron(
+            *[
+                fm_pauli(qubit, generator_prefactor(spectrum, j) * basis_func(basis, param))
+                for j, qubit in enumerate(target_qubits)
+            ]
+        )
+        idx_fm = block
+        idx_fms.append(idx_fm)
+    return idx_fms
+
+
+
+ +
+ +
+ + +

+ generator_prefactor(spectrum, qubit_index) + +

+ + +
+ +

Converts a spectrum string, e.g. tower or exponential.

+

The result is the correct generator prefactor.

+ +
+ Source code in qadence/constructors/utils.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
def generator_prefactor(spectrum: str, qubit_index: int) -> float | int:
+    """Converts a spectrum string, e.g. tower or exponential.
+
+    The result is the correct generator prefactor.
+    """
+    spectrum = spectrum.lower()
+    conversion_dict: dict[str, float | int] = {
+        "simple": 1,
+        "tower": qubit_index + 1,
+        "exponential": 2 * PI / (2 ** (qubit_index + 1)),
+    }
+    return conversion_dict[spectrum]
+
+
+
+ +
+ +
+ + +

+ get_fm_qubits(fm_strategy, i, n_qubits, n_features) + +

+ + +
+ +

Returns the list of target qubits for the given feature map strategy and feature index.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
fm_strategy +
+

The feature map strategy to be used. Possible values +are 'parallel' or 'serial'.

+
+

+ + TYPE: + str + +

+
i +
+

The feature index.

+
+

+ + TYPE: + int + +

+
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
n_features +
+

The number of features.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Iterable + + +
+

List[int]: The list of target qubits.

+
+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + ValueError + + +
+

If the feature map strategy is not implemented.

+
+
+ +
+ Source code in qadence/constructors/utils.py +
73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
def get_fm_qubits(fm_strategy: str, i: int, n_qubits: int, n_features: int) -> Iterable:
+    """Returns the list of target qubits for the given feature map strategy and feature index.
+
+    Args:
+        fm_strategy (str): The feature map strategy to be used. Possible values
+            are 'parallel' or 'serial'.
+        i (int): The feature index.
+        n_qubits (int): The number of qubits.
+        n_features (int): The number of features.
+
+    Returns:
+        List[int]: The list of target qubits.
+
+    Raises:
+        ValueError: If the feature map strategy is not implemented.
+    """
+    if fm_strategy == "parallel":
+        n_qubits_per_feature = int(n_qubits / n_features)
+        target_qubits = range(i * n_qubits_per_feature, (i + 1) * n_qubits_per_feature)
+    elif fm_strategy == "serial":
+        target_qubits = range(0, n_qubits)
+    else:
+        raise ValueError(f"Feature map strategy {fm_strategy} not implemented.")
+    return target_qubits
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/draw/index.html b/v1.7.4/api/draw/index.html new file mode 100644 index 000000000..5a2c8fc6c --- /dev/null +++ b/v1.7.4/api/draw/index.html @@ -0,0 +1,2920 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Drawing - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Drawing

+ +

Drawing

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ display(x, qcd=None, layout='LR', theme='light', fill=True, **kwargs) + +

+ + +
+ +

Display a block, circuit, or quantum model.

+

The kwargs are forwarded to +the underlying nx.Graph, so you can e.g. specify the size of the resulting plot via +size="2,2" (see examples)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
x +
+

AbstractBlock, QuantumCircuit, or QuantumModel.

+
+

+ + TYPE: + Any + +

+
qcd +
+

Circuit diagram to plot the block into.

+
+

+ + TYPE: + QuantumCircuitDiagram | Cluster | None + + + DEFAULT: + None + +

+
layout +
+

Can be either "LR" (left-right), or "TB" (top-bottom).

+
+

+ + TYPE: + str + + + DEFAULT: + 'LR' + +

+
theme +
+

Available themes are: ["light", "dark", "black", "white"].

+
+

+ + TYPE: + str + + + DEFAULT: + 'light' + +

+
fill +
+

Whether to fill the passed x with identities.

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
kwargs +
+

Passed on to nx.Graph

+
+

+ + TYPE: + Any + + + DEFAULT: + {} + +

+
+

Examples: +

from qadence import X, Y, kron
+from qadence.draw import display
+
+b = kron(X(0), Y(1))
+display(b, size="1,1", theme="dark")
+
+
+ + + +

+ +
+ Source code in qadence/draw/__init__.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
def display(
+    x: Any,
+    qcd: QuantumCircuitDiagram | Cluster | None = None,
+    layout: str = "LR",
+    theme: str = "light",
+    fill: bool = True,
+    **kwargs: Any,
+) -> Graph:
+    """Display a block, circuit, or quantum model.
+
+    The `kwargs` are forwarded to
+    the underlying `nx.Graph`, so you can e.g. specify the size of the resulting plot via
+    `size="2,2"` (see examples)
+
+    Arguments:
+        x: `AbstractBlock`, `QuantumCircuit`, or `QuantumModel`.
+        qcd: Circuit diagram to plot the block into.
+        layout: Can be either "LR" (left-right), or "TB" (top-bottom).
+        theme: Available themes are: ["light", "dark", "black", "white"].
+        fill: Whether to fill the passed `x` with identities.
+        kwargs: Passed on to `nx.Graph`
+
+    Examples:
+    ```python exec="on" source="material-block" html="1"
+    from qadence import X, Y, kron
+    from qadence.draw import display
+
+    b = kron(X(0), Y(1))
+    def display(*args, **kwargs): return args # markdown-exec: hide
+    display(b, size="1,1", theme="dark")
+    ```
+    """
+    return make_diagram(x, **kwargs).show()
+
+
+
+ +
+ +
+ + +

+ savefig(x, filename, *args, **kwargs) + +

+ + +
+ +

Save a block, circuit, or quantum model to file. Accepts the same args/kwargs as display.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
x +
+

AbstractBlock, QuantumCircuit, or QuantumModel.

+
+

+ + TYPE: + Any + +

+
filename +
+

Should end in svg/png.

+
+

+ + TYPE: + str + +

+
args +
+

Same as in display.

+
+

+ + TYPE: + Any + + + DEFAULT: + () + +

+
kwargs +
+

Same as in display.

+
+

+ + TYPE: + Any + + + DEFAULT: + {} + +

+
+

Examples: +

from qadence import X, Y, kron
+from qadence.draw import display
+
+b = kron(X(0), Y(1))
+savefig(b, "test.svg", size="1,1", theme="dark")
+
+
+ + + +

+ +
+ Source code in qadence/draw/__init__.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
def savefig(x: Any, filename: str, *args: Any, **kwargs: Any) -> None:
+    """Save a block, circuit, or quantum model to file. Accepts the same args/kwargs as `display`.
+
+    Arguments:
+        x: `AbstractBlock`, `QuantumCircuit`, or `QuantumModel`.
+        filename: Should end in svg/png.
+        args: Same as in `display`.
+        kwargs: Same as in `display`.
+
+    Examples:
+    ```python exec="on" source="material-block" html="1"
+    from qadence import X, Y, kron
+    from qadence.draw import display
+
+    b = kron(X(0), Y(1))
+    def savefig(*args, **kwargs): return args # markdown-exec: hide
+    savefig(b, "test.svg", size="1,1", theme="dark")
+    ```
+    """
+    make_diagram(x, *args, **kwargs).savefig(filename)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/execution/index.html b/v1.7.4/api/execution/index.html new file mode 100644 index 000000000..6e297b0f7 --- /dev/null +++ b/v1.7.4/api/execution/index.html @@ -0,0 +1,3330 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Execution - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Execution

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ expectation(x, observable, values={}, state=None, backend=BackendName.PYQTORCH, diff_mode=None, noise=None, endianness=Endianness.BIG, configuration=None) + +

+ + +
+ +

Convenience wrapper for the QuantumModel.expectation method.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
x +
+

Circuit, block, or (register+block) to run.

+
+

+ + TYPE: + Union[QuantumCircuit, AbstractBlock, Register, int] + +

+
observable +
+

Observable(s) w.r.t. which the expectation is computed.

+
+

+ + TYPE: + Union[list[AbstractBlock], AbstractBlock] + +

+
values +
+

User-facing parameter dict.

+
+

+ + TYPE: + dict + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor + + + DEFAULT: + None + +

+
backend +
+

Name of the backend to run on.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

Which differentiation mode to use.

+
+

+ + TYPE: + Union[DiffMode, str, None] + + + DEFAULT: + None + +

+
endianness +
+

The target device endianness.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
configuration +
+

The backend configuration.

+
+

+ + TYPE: + Union[BackendConfiguration, dict, None] + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A wavefunction

+
+
+
from qadence import RX, Z, Register, QuantumCircuit, expectation
+
+reg = Register(1)
+block = RX(0, 0.5)
+observable = Z(0)
+circ = QuantumCircuit(reg, block)
+
+# You can compute the expectation for a
+# QuantumCircuit with a given observable.
+expectation(circ, observable)
+
+# You can also use only a block.
+# In this case the register is constructed automatically to
+# Register.line(block.n_qubits)
+expectation(block, observable)
+
+# Or a register and block
+expectation(reg, block, observable)
+
+
+ + + +
+ +
+ Source code in qadence/execution.py +
@singledispatch
+def expectation(
+    x: Union[QuantumCircuit, AbstractBlock, Register, int],
+    observable: Union[list[AbstractBlock], AbstractBlock],
+    values: dict = {},
+    state: Tensor = None,
+    backend: BackendName = BackendName.PYQTORCH,
+    diff_mode: Union[DiffMode, str, None] = None,
+    noise: Union[Noise, None] = None,
+    endianness: Endianness = Endianness.BIG,
+    configuration: Union[BackendConfiguration, dict, None] = None,
+) -> Tensor:
+    """Convenience wrapper for the `QuantumModel.expectation` method.
+
+    Arguments:
+        x: Circuit, block, or (register+block) to run.
+        observable: Observable(s) w.r.t. which the expectation is computed.
+        values: User-facing parameter dict.
+        state: Initial state.
+        backend: Name of the backend to run on.
+        diff_mode: Which differentiation mode to use.
+        endianness: The target device endianness.
+        configuration: The backend configuration.
+
+    Returns:
+        A wavefunction
+
+    ```python exec="on" source="material-block"
+    from qadence import RX, Z, Register, QuantumCircuit, expectation
+
+    reg = Register(1)
+    block = RX(0, 0.5)
+    observable = Z(0)
+    circ = QuantumCircuit(reg, block)
+
+    # You can compute the expectation for a
+    # QuantumCircuit with a given observable.
+    expectation(circ, observable)
+
+    # You can also use only a block.
+    # In this case the register is constructed automatically to
+    # Register.line(block.n_qubits)
+    expectation(block, observable)
+
+    # Or a register and block
+    expectation(reg, block, observable)
+    ```
+    """
+
+    raise ValueError(f"Cannot execute {type(x)}")
+
+
+
+ +
+ +
+ + +

+ run(x, *args, values={}, state=None, backend=BackendName.PYQTORCH, endianness=Endianness.BIG, configuration=None) + +

+ + +
+ +

Convenience wrapper for the QuantumModel.run method.

+

This is a +functools.singledispatched function so it can be called with a number of different arguments. +See the examples of the expectation function. This function +works exactly the same.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
x +
+

Circuit, block, or (register+block) to run.

+
+

+ + TYPE: + Union[QuantumCircuit, AbstractBlock, Register, int] + +

+
values +
+

User-facing parameter dict.

+
+

+ + TYPE: + dict + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor + + + DEFAULT: + None + +

+
backend +
+

Name of the backend to run on.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
endianness +
+

The target device endianness.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
configuration +
+

The backend configuration.

+
+

+ + TYPE: + Union[BackendConfiguration, dict, None] + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A wavefunction

+
+
+ +
+ Source code in qadence/execution.py +
34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
@singledispatch
+def run(
+    x: Union[QuantumCircuit, AbstractBlock, Register, int],
+    *args: Any,
+    values: dict = {},
+    state: Tensor = None,
+    backend: BackendName = BackendName.PYQTORCH,
+    endianness: Endianness = Endianness.BIG,
+    configuration: Union[BackendConfiguration, dict, None] = None,
+) -> Tensor:
+    """Convenience wrapper for the `QuantumModel.run` method.
+
+     This is a
+    `functools.singledispatch`ed function so it can be called with a number of different arguments.
+    See the examples of the [`expectation`][qadence.execution.expectation] function. This function
+    works exactly the same.
+
+    Arguments:
+        x: Circuit, block, or (register+block) to run.
+        values: User-facing parameter dict.
+        state: Initial state.
+        backend: Name of the backend to run on.
+        endianness: The target device endianness.
+        configuration: The backend configuration.
+
+    Returns:
+        A wavefunction
+    """
+    raise ValueError(f"Cannot run {type(x)}")
+
+
+
+ +
+ +
+ + +

+ sample(x, *args, values={}, state=None, n_shots=100, backend=BackendName.PYQTORCH, endianness=Endianness.BIG, noise=None, configuration=None) + +

+ + +
+ +

Convenience wrapper for the QuantumModel.sample method.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
x +
+

Circuit, block, or (register+block) to run.

+
+

+ + TYPE: + Union[QuantumCircuit, AbstractBlock, Register, int] + +

+
values +
+

User-facing parameter dict.

+
+

+ + TYPE: + dict + + + DEFAULT: + {} + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Union[Tensor, None] + + + DEFAULT: + None + +

+
n_shots +
+

Number of shots per element in the batch.

+
+

+ + TYPE: + int + + + DEFAULT: + 100 + +

+
backend +
+

Name of the backend to run on.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
endianness +
+

The target device endianness.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
noise +
+

The noise model to use if any.

+
+

+ + TYPE: + Union[Noise, None] + + + DEFAULT: + None + +

+
configuration +
+

The backend configuration.

+
+

+ + TYPE: + Union[BackendConfiguration, dict, None] + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[Counter] + + +
+

A list of Counter instances with the sample results

+
+
+ +
+ Source code in qadence/execution.py +
@singledispatch
+def sample(
+    x: Union[QuantumCircuit, AbstractBlock, Register, int],
+    *args: Any,
+    values: dict = {},
+    state: Union[Tensor, None] = None,
+    n_shots: int = 100,
+    backend: BackendName = BackendName.PYQTORCH,
+    endianness: Endianness = Endianness.BIG,
+    noise: Union[Noise, None] = None,
+    configuration: Union[BackendConfiguration, dict, None] = None,
+) -> list[Counter]:
+    """Convenience wrapper for the `QuantumModel.sample` method.
+
+    Arguments:
+        x: Circuit, block, or (register+block) to run.
+        values: User-facing parameter dict.
+        state: Initial state.
+        n_shots: Number of shots per element in the batch.
+        backend: Name of the backend to run on.
+        endianness: The target device endianness.
+        noise: The noise model to use if any.
+        configuration: The backend configuration.
+
+    Returns:
+        A list of Counter instances with the sample results
+    """
+    raise ValueError(f"Cannot sample from {type(x)}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/ml_tools/index.html b/v1.7.4/api/ml_tools/index.html new file mode 100644 index 000000000..409bdf193 --- /dev/null +++ b/v1.7.4/api/ml_tools/index.html @@ -0,0 +1,8142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + QML tools - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

QML tools

+ +

ML Tools

+

This module implements gradient-free and gradient-based training loops for torch Modules and QuantumModel. It also implements the QNN class.

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ AnsatzConfig(depth=1, ansatz_type=AnsatzType.HEA, ansatz_strategy=Strategy.DIGITAL, strategy_args=dict(), param_prefix='theta') + + + dataclass + + +

+ + +
+ + + + + +
+ + + + + + + +
+ + + +

+ ansatz_strategy: Strategy = Strategy.DIGITAL + + + class-attribute + instance-attribute + + +

+ + +
+ +

Ansatz strategy.

+

DIGITAL for fully digital ansatz. Required if ansatz_type is iia. +SDAQC for analog entangling block. +RYDBERG for fully rydberg hea ansatz.

+
+ +
+ +
+ + + +

+ ansatz_type: AnsatzType = AnsatzType.HEA + + + class-attribute + instance-attribute + + +

+ + +
+ +

What type of ansatz.

+

HEA for Hardware Efficient Ansatz. +IIA for Identity intialized Ansatz.

+
+ +
+ +
+ + + +

+ depth: int = 1 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Number of layers of the ansatz.

+
+ +
+ +
+ + + +

+ param_prefix: str = 'theta' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The base bame of the variational parameter.

+
+ +
+ +
+ + + +

+ strategy_args: dict = field(default_factory=dict) + + + class-attribute + instance-attribute + + +

+ + +
+ +

A dictionary containing keyword arguments to the function creating the ansatz.

+

Details about each below.

+

For DIGITAL strategy, accepts the following: + periodic (bool): if the qubits should be linked periodically. + periodic=False is not supported in emu-c. + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + Defaults to [RX, RY, RX] for hea and [RX, RY] for iia. + entangler (AbstractBlock): 2-qubit entangling operation. + Supports CNOT, CZ, CRX, CRY, CRZ, CPHASE. Controlld rotations + will have variational parameters on the rotation angles. + Defaults to CNOT

+

For SDAQC strategy, accepts the following: + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + Defaults to [RX, RY, RX] for hea and [RX, RY] for iia. + entangler (AbstractBlock): Hamiltonian generator for the + analog entangling layer. Time parameter is considered variational. + Defaults to NN interaction.

+

For RYDBERG strategy, accepts the following: + addressable_detuning: whether to turn on the trainable semi-local addressing pattern + on the detuning (n_i terms in the Hamiltonian). + Defaults to True. + addressable_drive: whether to turn on the trainable semi-local addressing pattern + on the drive (sigma_i^x terms in the Hamiltonian). + Defaults to False. + tunable_phase: whether to have a tunable phase to get both sigma^x and sigma^y rotations + in the drive term. If False, only a sigma^x term will be included in the drive part + of the Hamiltonian generator. + Defaults to False.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ FeatureMapConfig(num_features=0, basis_set=BasisSet.FOURIER, reupload_scaling=ReuploadScaling.CONSTANT, feature_range=None, target_range=None, multivariate_strategy=MultivariateStrategy.PARALLEL, feature_map_strategy=Strategy.DIGITAL, param_prefix=None, num_repeats=0, operation=None, inputs=None) + + + dataclass + + +

+ + +
+ + + + + +
+ + + + + + + +
+ + + +

+ basis_set: BasisSet | dict[str, BasisSet] = BasisSet.FOURIER + + + class-attribute + instance-attribute + + +

+ + +
+ +

Basis set for feature encoding.

+

Takes qadence.BasisSet. +Give a single BasisSet to use the same for all features. +Give a dict of (str, BasisSet) where the key is the name of the variable and the +value is the BasisSet to use for encoding that feature. +BasisSet.FOURIER for Fourier encoding. +BasisSet.CHEBYSHEV for Chebyshev encoding.

+
+ +
+ +
+ + + +

+ feature_map_strategy: Strategy = Strategy.DIGITAL + + + class-attribute + instance-attribute + + +

+ + +
+ +

Strategy for feature map.

+

Accepts DIGITAL, ANALOG or RYDBERG. Defaults to DIGITAL. +If the strategy is incompatible with the operation chosen, then operation +gets preference and the given strategy is ignored.

+
+ +
+ +
+ + + +

+ feature_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Range of data that the input data is assumed to come from.

+

Give a single tuple to use the same range for all features. +Give a dict of (str, tuple) where the key is the name of the variable and the +value is the feature range to use for that feature.

+
+ +
+ +
+ + + +

+ inputs: list[Basic | str] | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

List that indicates the order of variables of the tensors that are passed.

+

Optional if a single feature is being encoded, required otherwise. Given input tensors +xs = torch.rand(batch_size, input_size:=2) a QNN with inputs=["t", "x"] will +assign t, x = xs[:,0], xs[:,1].

+
+ +
+ +
+ + + +

+ multivariate_strategy: MultivariateStrategy = MultivariateStrategy.PARALLEL + + + class-attribute + instance-attribute + + +

+ + +
+ +

The encoding strategy in case of multi-variate function.

+

Takes qadence.MultivariateStrategy. +If PARALLEL, the features are encoded in one block of rotation gates +with each feature given an equal number of qubits. +If SERIES, the features are encoded sequentially, with an ansatz block +between. PARALLEL is allowed only for DIGITAL feature_map_strategy.

+
+ +
+ +
+ + + +

+ num_features: int = 0 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Number of feature parameters to be encoded.

+

Defaults to 0. Thus, no feature parameters are encoded.

+
+ +
+ +
+ + + +

+ num_repeats: int | dict[str, int] = 0 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Number of feature map layers repeated in the data reuploading step.

+

If all are to be repeated the same number of times, then can give a single +int. For different number of repetitions for each feature, provide a dict +of (str, int) where the key is the name of the variable and the value is the +number of repetitions for that feature. +This amounts to the number of additional reuploads. So if num_repeats is N, +the data gets uploaded N+1 times. Defaults to no repetition.

+
+ +
+ +
+ + + +

+ operation: Callable[[Parameter | Basic], AnalogBlock] | Type[RX] | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Type of operation.

+

Choose among the analog or digital rotations or a custom +callable function returning an AnalogBlock instance. If the type of operation is +incompatible with the strategy chosen, then operation gets preference and +the given strategy is ignored.

+
+ +
+ +
+ + + +

+ param_prefix: str | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

String prefix to create trainable parameters in Feature Map.

+

A string prefix to create trainable parameters multiplying the feature parameter +inside the feature-encoding function. Note that currently this does not take into +account the domain of the feature-encoding function. +Defaults to None and thus, the feature map is not trainable. +Note that this is separate from the name of the parameter. +The user can provide a single prefix for all features, and they will be appended +by appropriate feature name automatically.

+
+ +
+ +
+ + + +

+ reupload_scaling: ReuploadScaling | dict[str, ReuploadScaling] = ReuploadScaling.CONSTANT + + + class-attribute + instance-attribute + + +

+ + +
+ +

Scaling for encoding the same feature on different qubits.

+

Scaling used to encode the same feature on different qubits in the +same layer of the feature maps. Takes qadence.ReuploadScaling. +Give a single ReuploadScaling to use the same for all features. +Give a dict of (str, ReuploadScaling) where the key is the name of the variable and the +value is the ReuploadScaling to use for encoding that feature. +ReuploadScaling.CONSTANT for constant scaling. +ReuploadScaling.TOWER for linearly increasing scaling. +ReuploadScaling.EXP for exponentially increasing scaling.

+
+ +
+ +
+ + + +

+ target_range: tuple[float, float] | dict[str, tuple[float, float]] | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Range of data the data encoder assumes as natural range.

+

Give a single tuple to use the same range for all features. +Give a dict of (str, tuple) where the key is the name of the variable and the +value is the target range to use for that feature.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ MLFlowConfig() + +

+ + +
+ + +

Configuration for mlflow tracking.

+

Example:

+
export MLFLOW_TRACKING_URI=tracking_uri
+export MLFLOW_EXPERIMENT=experiment_name
+export MLFLOW_RUN_NAME=run_name
+
+ +
+ Source code in qadence/ml_tools/config.py +
def __init__(self) -> None:
+    import mlflow
+
+    self.tracking_uri: str = os.getenv("MLFLOW_TRACKING_URI", "")
+    """The URI of the mlflow tracking server.
+
+    An empty string, or a local file path, prefixed with file:/.
+    Data is stored locally at the provided file (or ./mlruns if empty).
+    """
+
+    self.experiment_name: str = os.getenv("MLFLOW_EXPERIMENT", str(uuid4()))
+    """The name of the experiment.
+
+    If None or empty, a new experiment is created with a random UUID.
+    """
+
+    self.run_name: str = os.getenv("MLFLOW_RUN_NAME", str(uuid4()))
+    """The name of the run."""
+
+    mlflow.set_tracking_uri(self.tracking_uri)
+
+    # activate existing or create experiment
+    exp_filter_string = f"name = '{self.experiment_name}'"
+    if not mlflow.search_experiments(filter_string=exp_filter_string):
+        mlflow.create_experiment(name=self.experiment_name)
+
+    self.experiment = mlflow.set_experiment(self.experiment_name)
+    self.run = mlflow.start_run(run_name=self.run_name, nested=False)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ experiment_name: str = os.getenv('MLFLOW_EXPERIMENT', str(uuid4())) + + + instance-attribute + + +

+ + +
+ +

The name of the experiment.

+

If None or empty, a new experiment is created with a random UUID.

+
+ +
+ +
+ + + +

+ run_name: str = os.getenv('MLFLOW_RUN_NAME', str(uuid4())) + + + instance-attribute + + +

+ + +
+ +

The name of the run.

+
+ +
+ +
+ + + +

+ tracking_uri: str = os.getenv('MLFLOW_TRACKING_URI', '') + + + instance-attribute + + +

+ + +
+ +

The URI of the mlflow tracking server.

+

An empty string, or a local file path, prefixed with file:/. +Data is stored locally at the provided file (or ./mlruns if empty).

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ TrainConfig(max_iter=10000, print_every=1000, write_every=50, checkpoint_every=5000, plot_every=5000, log_model=False, folder=None, create_subfolder_per_run=False, checkpoint_best_only=False, val_every=None, val_epsilon=1e-05, validation_criterion=None, trainstop_criterion=None, batch_size=1, verbose=True, tracking_tool=ExperimentTrackingTool.TENSORBOARD, hyperparams=dict(), plotting_functions=tuple()) + + + dataclass + + +

+ + +
+ + +

Default config for the train function.

+

The default value of +each field can be customized with the constructor:

+
from qadence.ml_tools import TrainConfig
+c = TrainConfig(folder="/tmp/train")
+
+
+ +
TrainConfig(max_iter=10000, print_every=1000, write_every=50, checkpoint_every=5000, plot_every=5000, log_model=False, folder=PosixPath('/tmp/train'), create_subfolder_per_run=False, checkpoint_best_only=False, val_every=None, val_epsilon=1e-05, validation_criterion=<function TrainConfig.__post_init__.<locals>.<lambda> at 0x7f9d83aa1900>, trainstop_criterion=<function TrainConfig.__post_init__.<locals>.<lambda> at 0x7f9d83aa1870>, batch_size=1, verbose=True, tracking_tool=<ExperimentTrackingTool.TENSORBOARD: 'tensorboard'>, hyperparams={}, plotting_functions=())
+
+ +
+ + + + +
+ + + + + + + +
+ + + +

+ batch_size: int = 1 + + + class-attribute + instance-attribute + + +

+ + +
+ +

The batch_size to use when passing a list/tuple of torch.Tensors.

+
+ +
+ +
+ + + +

+ checkpoint_best_only: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

Write model/optimizer checkpoint only if a metric has improved.

+
+ +
+ +
+ + + +

+ checkpoint_every: int = 5000 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Write model/optimizer checkpoint.

+
+ +
+ +
+ + + +

+ create_subfolder_per_run: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

Checkpoint/tensorboard logs stored in subfolder with name <timestamp>_<PID>.

+

Prevents continuing from previous checkpoint, useful for fast prototyping.

+
+ +
+ +
+ + + +

+ folder: Path | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Checkpoint/tensorboard logs folder.

+
+ +
+ +
+ + + +

+ hyperparams: dict = field(default_factory=dict) + + + class-attribute + instance-attribute + + +

+ + +
+ +

Hyperparameters to track.

+
+ +
+ +
+ + + +

+ log_model: bool = False + + + class-attribute + instance-attribute + + +

+ + +
+ +

Logs a serialised version of the model.

+
+ +
+ +
+ + + +

+ max_iter: int = 10000 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Number of training iterations.

+
+ +
+ +
+ + + +

+ plot_every: int = 5000 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Write figures.

+
+ +
+ +
+ + + +

+ plotting_functions: tuple[LoggablePlotFunction, ...] = field(default_factory=tuple) + + + class-attribute + instance-attribute + + +

+ + +
+ +

Functions for in-train plotting.

+
+ +
+ +
+ + + +

+ print_every: int = 1000 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Print loss/metrics.

+
+ +
+ +
+ + + +

+ tracking_tool: ExperimentTrackingTool = ExperimentTrackingTool.TENSORBOARD + + + class-attribute + instance-attribute + + +

+ + +
+ +

The tracking tool of choice.

+
+ +
+ +
+ + + +

+ trainstop_criterion: Callable | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

A boolean function which evaluates a given training stopping metric is satisfied.

+
+ +
+ +
+ + + +

+ val_epsilon: float = 1e-05 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Safety margin to check if validation loss is smaller than the lowest.

+

validation loss across previous iterations.

+
+ +
+ +
+ + + +

+ val_every: int | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

Calculate validation metric.

+

If None, validation check is not performed.

+
+ +
+ +
+ + + +

+ validation_criterion: Callable | None = None + + + class-attribute + instance-attribute + + +

+ + +
+ +

A boolean function which evaluates a given validation metric is satisfied.

+
+ +
+ +
+ + + +

+ verbose: bool = True + + + class-attribute + instance-attribute + + +

+ + +
+ +

Whether or not to print out metrics values during training.

+
+ +
+ +
+ + + +

+ write_every: int = 50 + + + class-attribute + instance-attribute + + +

+ + +
+ +

Write loss and metrics with the tracking tool.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ get_parameters(model) + +

+ + +
+ +

Retrieve all trainable model parameters in a single vector.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
model +
+

the input PyTorch model

+
+

+ + TYPE: + Module + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ Tensor + +
+

a 1-dimensional tensor with the parameters

+
+

+ + TYPE: + Tensor + +

+
+ +
+ Source code in qadence/ml_tools/parameters.py +
 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
def get_parameters(model: Module) -> Tensor:
+    """Retrieve all trainable model parameters in a single vector.
+
+    Args:
+        model (Module): the input PyTorch model
+
+    Returns:
+        Tensor: a 1-dimensional tensor with the parameters
+    """
+    ps = [p.reshape(-1) for p in model.parameters() if p.requires_grad]
+    return torch.concat(ps)
+
+
+
+ +
+ +
+ + +

+ num_parameters(model) + +

+ + +
+ +

Return the total number of parameters of the given model.

+ +
+ Source code in qadence/ml_tools/parameters.py +
44
+45
+46
def num_parameters(model: Module) -> int:
+    """Return the total number of parameters of the given model."""
+    return len(get_parameters(model))
+
+
+
+ +
+ +
+ + +

+ set_parameters(model, theta) + +

+ + +
+ +

Set all trainable parameters of a model from a single vector.

+

Notice that this function assumes prior knowledge of right number +of parameters in the model

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
model +
+

the input PyTorch model

+
+

+ + TYPE: + Module + +

+
theta +
+

the parameters to assign

+
+

+ + TYPE: + Tensor + +

+
+ +
+ Source code in qadence/ml_tools/parameters.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
def set_parameters(model: Module, theta: Tensor) -> None:
+    """Set all trainable parameters of a model from a single vector.
+
+    Notice that this function assumes prior knowledge of right number
+    of parameters in the model
+
+    Args:
+        model (Module): the input PyTorch model
+        theta (Tensor): the parameters to assign
+    """
+
+    with torch.no_grad():
+        idx = 0
+        for ps in model.parameters():
+            if ps.requires_grad:
+                n = torch.numel(ps)
+                if ps.ndim == 0:
+                    ps[()] = theta[idx : idx + n]
+                else:
+                    ps[:] = theta[idx : idx + n].reshape(ps.size())
+                idx += n
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ optimize_step(model, optimizer, loss_fn, xs, device=None, dtype=None) + +

+ + +
+ +

Default Torch optimize step with closure.

+

This is the default optimization step which should work for most +of the standard use cases of optimization of Torch models

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
model +
+

The input model

+
+

+ + TYPE: + Module + +

+
optimizer +
+

The chosen Torch optimizer

+
+

+ + TYPE: + Optimizer + +

+
loss_fn +
+

A custom loss function

+
+

+ + TYPE: + Callable + +

+
xs +
+

the input data. If None it means +that the given model does not require any input data

+
+

+ + TYPE: + dict | list | Tensor | None + +

+
device +
+

A target device to run computation on.

+
+

+ + TYPE: + device + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ tuple + +
+

tuple containing the model, the optimizer, a dictionary with +the collected metrics and the compute value loss

+
+

+ + TYPE: + tuple[Tensor | float, dict | None] + +

+
+ +
+ Source code in qadence/ml_tools/optimize_step.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
def optimize_step(
+    model: Module,
+    optimizer: Optimizer,
+    loss_fn: Callable,
+    xs: dict | list | torch.Tensor | None,
+    device: torch.device = None,
+    dtype: torch.dtype = None,
+) -> tuple[torch.Tensor | float, dict | None]:
+    """Default Torch optimize step with closure.
+
+    This is the default optimization step which should work for most
+    of the standard use cases of optimization of Torch models
+
+    Args:
+        model (Module): The input model
+        optimizer (Optimizer): The chosen Torch optimizer
+        loss_fn (Callable): A custom loss function
+        xs (dict | list | torch.Tensor | None): the input data. If None it means
+            that the given model does not require any input data
+        device (torch.device): A target device to run computation on.
+
+    Returns:
+        tuple: tuple containing the model, the optimizer, a dictionary with
+            the collected metrics and the compute value loss
+    """
+
+    loss, metrics = None, {}
+    xs_to_device = data_to_device(xs, device=device, dtype=dtype)
+
+    def closure() -> Any:
+        # NOTE: We need the nonlocal as we can't return a metric dict and
+        # because e.g. LBFGS calls this closure multiple times but for some
+        # reason the returned loss is always the first one...
+        nonlocal metrics, loss
+        optimizer.zero_grad()
+        loss, metrics = loss_fn(model, xs_to_device)
+        loss.backward(retain_graph=True)
+        return loss.item()
+
+    optimizer.step(closure)
+    # return the loss/metrics that are being mutated inside the closure...
+    return loss, metrics
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ train(model, dataloader, optimizer, config, loss_fn, device=None, optimize_step=optimize_step, dtype=None) + +

+ + +
+ +

Runs the training loop with gradient-based optimizer.

+

Assumes that loss_fn returns a tuple of (loss, +metrics: dict), where metrics is a dict of scalars. Loss and metrics are +written to tensorboard. Checkpoints are written every +config.checkpoint_every steps (and after the last training step). If a +checkpoint is found at config.folder we resume training from there. The +tensorboard logs can be viewed via tensorboard --logdir /path/to/folder.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
model +
+

The model to train.

+
+

+ + TYPE: + Module + +

+
dataloader +
+

dataloader of different types. If None, no data is required by +the model

+
+

+ + TYPE: + Union[None, DataLoader, DictDataLoader] + +

+
optimizer +
+

The optimizer to use.

+
+

+ + TYPE: + Optimizer + +

+
config +
+

TrainConfig with additional training options.

+
+

+ + TYPE: + TrainConfig + +

+
loss_fn +
+

Loss function returning (loss: float, metrics: dict[str, float], ...)

+
+

+ + TYPE: + Callable + +

+
device +
+

String defining device to train on, pass 'cuda' for GPU.

+
+

+ + TYPE: + device + + + DEFAULT: + None + +

+
optimize_step +
+

Customizable optimization callback which is called at every iteration.= +The function must have the signature optimize_step(model, +optimizer, loss_fn, xs, device="cpu").

+
+

+ + TYPE: + Callable + + + DEFAULT: + optimize_step + +

+
dtype +
+

The dtype to use for the data.

+
+

+ + TYPE: + dtype + + + DEFAULT: + None + +

+
+

Example: +

from pathlib import Path
+import torch
+from itertools import count
+from qadence import Parameter, QuantumCircuit, Z
+from qadence import hamiltonian_factory, hea, feature_map, chain
+from qadence import QNN
+from qadence.ml_tools import TrainConfig, train_with_grad, to_dataloader
+
+n_qubits = 2
+fm = feature_map(n_qubits)
+ansatz = hea(n_qubits=n_qubits, depth=3)
+observable = hamiltonian_factory(n_qubits, detuning = Z)
+circuit = QuantumCircuit(n_qubits, fm, ansatz)
+
+model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad")
+batch_size = 1
+input_values = {"phi": torch.rand(batch_size, requires_grad=True)}
+pred = model(input_values)
+
+## lets prepare the train routine
+
+cnt = count()
+criterion = torch.nn.MSELoss()
+optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
+
+def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]:
+    next(cnt)
+    x, y = data[0], data[1]
+    out = model(x)
+    loss = criterion(out, y)
+    return loss, {}
+
+tmp_path = Path("/tmp")
+n_epochs = 5
+batch_size = 25
+config = TrainConfig(
+    folder=tmp_path,
+    max_iter=n_epochs,
+    checkpoint_every=100,
+    write_every=100,
+)
+x = torch.linspace(0, 1, batch_size).reshape(-1, 1)
+y = torch.sin(x)
+data = to_dataloader(x, y, batch_size=batch_size, infinite=True)
+train_with_grad(model, data, optimizer, config, loss_fn=loss_fn)
+
+
+ + + +

+ +
+ Source code in qadence/ml_tools/train_grad.py +
def train(
+    model: Module,
+    dataloader: Union[None, DataLoader, DictDataLoader],
+    optimizer: Optimizer,
+    config: TrainConfig,
+    loss_fn: Callable,
+    device: torch_device = None,
+    optimize_step: Callable = optimize_step,
+    dtype: torch_dtype = None,
+) -> tuple[Module, Optimizer]:
+    """Runs the training loop with gradient-based optimizer.
+
+    Assumes that `loss_fn` returns a tuple of (loss,
+    metrics: dict), where `metrics` is a dict of scalars. Loss and metrics are
+    written to tensorboard. Checkpoints are written every
+    `config.checkpoint_every` steps (and after the last training step).  If a
+    checkpoint is found at `config.folder` we resume training from there.  The
+    tensorboard logs can be viewed via `tensorboard --logdir /path/to/folder`.
+
+    Args:
+        model: The model to train.
+        dataloader: dataloader of different types. If None, no data is required by
+            the model
+        optimizer: The optimizer to use.
+        config: `TrainConfig` with additional training options.
+        loss_fn: Loss function returning (loss: float, metrics: dict[str, float], ...)
+        device: String defining device to train on, pass 'cuda' for GPU.
+        optimize_step: Customizable optimization callback which is called at every iteration.=
+            The function must have the signature `optimize_step(model,
+            optimizer, loss_fn, xs, device="cpu")`.
+        dtype: The dtype to use for the data.
+
+    Example:
+    ```python exec="on" source="material-block"
+    from pathlib import Path
+    import torch
+    from itertools import count
+    from qadence import Parameter, QuantumCircuit, Z
+    from qadence import hamiltonian_factory, hea, feature_map, chain
+    from qadence import QNN
+    from qadence.ml_tools import TrainConfig, train_with_grad, to_dataloader
+
+    n_qubits = 2
+    fm = feature_map(n_qubits)
+    ansatz = hea(n_qubits=n_qubits, depth=3)
+    observable = hamiltonian_factory(n_qubits, detuning = Z)
+    circuit = QuantumCircuit(n_qubits, fm, ansatz)
+
+    model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad")
+    batch_size = 1
+    input_values = {"phi": torch.rand(batch_size, requires_grad=True)}
+    pred = model(input_values)
+
+    ## lets prepare the train routine
+
+    cnt = count()
+    criterion = torch.nn.MSELoss()
+    optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
+
+    def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]:
+        next(cnt)
+        x, y = data[0], data[1]
+        out = model(x)
+        loss = criterion(out, y)
+        return loss, {}
+
+    tmp_path = Path("/tmp")
+    n_epochs = 5
+    batch_size = 25
+    config = TrainConfig(
+        folder=tmp_path,
+        max_iter=n_epochs,
+        checkpoint_every=100,
+        write_every=100,
+    )
+    x = torch.linspace(0, 1, batch_size).reshape(-1, 1)
+    y = torch.sin(x)
+    data = to_dataloader(x, y, batch_size=batch_size, infinite=True)
+    train_with_grad(model, data, optimizer, config, loss_fn=loss_fn)
+    ```
+    """
+    # load available checkpoint
+    init_iter = 0
+    log_device = "cpu" if device is None else device
+    if config.folder:
+        model, optimizer, init_iter = load_checkpoint(
+            config.folder, model, optimizer, device=log_device
+        )
+        logger.debug(f"Loaded model and optimizer from {config.folder}")
+
+    # Move model to device before optimizer is loaded
+    if isinstance(model, DataParallel):
+        model = model.module.to(device=device, dtype=dtype)
+    else:
+        model = model.to(device=device, dtype=dtype)
+    # initialize tracking tool
+    if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
+        writer = SummaryWriter(config.folder, purge_step=init_iter)
+    else:
+        writer = importlib.import_module("mlflow")
+
+    perform_val = isinstance(config.val_every, int)
+    if perform_val:
+        if not isinstance(dataloader, DictDataLoader):
+            raise ValueError(
+                "If `config.val_every` is provided as an integer, dataloader must"
+                "be an instance of `DictDataLoader`."
+            )
+        iter_keys = dataloader.dataloaders.keys()
+        if "train" not in iter_keys or "val" not in iter_keys:
+            raise ValueError(
+                "If `config.val_every` is provided as an integer, the dictdataloader"
+                "must have `train` and `val` keys to access the respective dataloaders."
+            )
+        val_dataloader = dataloader.dataloaders["val"]
+        dataloader = dataloader.dataloaders["train"]
+
+    ## Training
+    progress = Progress(
+        TextColumn("[progress.description]{task.description}"),
+        BarColumn(),
+        TaskProgressColumn(),
+        TimeRemainingColumn(elapsed_when_finished=True),
+    )
+    data_dtype = None
+    if dtype:
+        data_dtype = float64 if dtype == complex128 else float32
+
+    best_val_loss = math.inf
+
+    with progress:
+        dl_iter = iter(dataloader) if dataloader is not None else None
+
+        # Initial validation evaluation
+        try:
+            if perform_val:
+                dl_iter_val = iter(val_dataloader) if val_dataloader is not None else None
+                xs = next(dl_iter_val)
+                xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
+                best_val_loss, metrics = loss_fn(model, xs_to_device)
+
+                metrics["val_loss"] = best_val_loss
+                write_tracker(writer, None, metrics, init_iter, tracking_tool=config.tracking_tool)
+
+            if config.folder:
+                if config.checkpoint_best_only:
+                    write_checkpoint(config.folder, model, optimizer, iteration="best")
+                else:
+                    write_checkpoint(config.folder, model, optimizer, init_iter)
+
+            plot_tracker(
+                writer,
+                model,
+                init_iter,
+                config.plotting_functions,
+                tracking_tool=config.tracking_tool,
+            )
+
+        except KeyboardInterrupt:
+            logger.info("Terminating training gracefully after the current iteration.")
+
+        # outer epoch loop
+        init_iter += 1
+        for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
+            try:
+                # in case there is not data needed by the model
+                # this is the case, for example, of quantum models
+                # which do not have classical input data (e.g. chemistry)
+                if dataloader is None:
+                    loss, metrics = optimize_step(
+                        model=model,
+                        optimizer=optimizer,
+                        loss_fn=loss_fn,
+                        xs=None,
+                        device=device,
+                        dtype=data_dtype,
+                    )
+                    loss = loss.item()
+
+                elif isinstance(dataloader, (DictDataLoader, DataLoader)):
+                    loss, metrics = optimize_step(
+                        model=model,
+                        optimizer=optimizer,
+                        loss_fn=loss_fn,
+                        xs=next(dl_iter),  # type: ignore[arg-type]
+                        device=device,
+                        dtype=data_dtype,
+                    )
+
+                else:
+                    raise NotImplementedError(
+                        f"Unsupported dataloader type: {type(dataloader)}. "
+                        "You can use e.g. `qadence.ml_tools.to_dataloader` to build a dataloader."
+                    )
+
+                if iteration % config.print_every == 0 and config.verbose:
+                    # Note that the loss returned by optimize_step
+                    # is the value before doing the training step
+                    # which is printed accordingly by the previous iteration number
+                    print_metrics(loss, metrics, iteration - 1)
+
+                if iteration % config.write_every == 0:
+                    write_tracker(
+                        writer, loss, metrics, iteration, tracking_tool=config.tracking_tool
+                    )
+
+                if iteration % config.plot_every == 0:
+                    plot_tracker(
+                        writer,
+                        model,
+                        iteration,
+                        config.plotting_functions,
+                        tracking_tool=config.tracking_tool,
+                    )
+                if perform_val:
+                    if iteration % config.val_every == 0:
+                        xs = next(dl_iter_val)
+                        xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
+                        val_loss, *_ = loss_fn(model, xs_to_device)
+                        if config.validation_criterion(val_loss, best_val_loss, config.val_epsilon):  # type: ignore[misc]
+                            best_val_loss = val_loss
+                            if config.folder and config.checkpoint_best_only:
+                                write_checkpoint(config.folder, model, optimizer, iteration="best")
+                            metrics["val_loss"] = val_loss
+                            write_tracker(
+                                writer, loss, metrics, iteration, tracking_tool=config.tracking_tool
+                            )
+
+                if config.folder:
+                    if iteration % config.checkpoint_every == 0 and not config.checkpoint_best_only:
+                        write_checkpoint(config.folder, model, optimizer, iteration)
+
+            except KeyboardInterrupt:
+                logger.info("Terminating training gracefully after the current iteration.")
+                break
+
+        # Handling printing the last training loss
+        # as optimize_step does not give the loss value at the last iteration
+        try:
+            xs = next(dl_iter) if dataloader is not None else None  # type: ignore[arg-type]
+            xs_to_device = data_to_device(xs, device=device, dtype=data_dtype)
+            loss, metrics, *_ = loss_fn(model, xs_to_device)
+            if dataloader is None:
+                loss = loss.item()
+            if iteration % config.print_every == 0 and config.verbose:
+                print_metrics(loss, metrics, iteration)
+
+        except KeyboardInterrupt:
+            logger.info("Terminating training gracefully after the current iteration.")
+
+    # Final checkpointing and writing
+    if config.folder and not config.checkpoint_best_only:
+        write_checkpoint(config.folder, model, optimizer, iteration)
+    write_tracker(writer, loss, metrics, iteration, tracking_tool=config.tracking_tool)
+
+    # writing hyperparameters
+    if config.hyperparams:
+        log_tracker(writer, config.hyperparams, metrics, tracking_tool=config.tracking_tool)
+
+    # logging the model
+    if config.log_model:
+        log_model_tracker(writer, model, dataloader, tracking_tool=config.tracking_tool)
+
+    # close tracker
+    if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
+        writer.close()
+    elif config.tracking_tool == ExperimentTrackingTool.MLFLOW:
+        writer.end_run()
+
+    return model, optimizer
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ train(model, dataloader, optimizer, config, loss_fn) + +

+ + +
+ +

Runs the training loop with a gradient-free optimizer.

+

Assumes that loss_fn returns a tuple of (loss, metrics: dict), where +metrics is a dict of scalars. Loss and metrics are written to +tensorboard. Checkpoints are written every config.checkpoint_every steps +(and after the last training step). If a checkpoint is found at config.folder +we resume training from there. The tensorboard logs can be viewed via +tensorboard --logdir /path/to/folder.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
model +
+

The model to train

+
+

+ + TYPE: + Module + +

+
dataloader +
+

Dataloader constructed via dictdataloader

+
+

+ + TYPE: + DictDataLoader | DataLoader | None + +

+
optimizer +
+

The optimizer to use taken from the Nevergrad library. If this is not +the case the function will raise an AssertionError

+
+

+ + TYPE: + Optimizer + +

+
config +
+

TrainConfig with additional training options.

+
+

+ + TYPE: + TrainConfig + +

+
loss_fn +
+

Loss function returning (loss: float, metrics: dict[str, float])

+
+

+ + TYPE: + Callable + +

+
+ +
+ Source code in qadence/ml_tools/train_no_grad.py +
def train(
+    model: Module,
+    dataloader: DictDataLoader | DataLoader | None,
+    optimizer: NGOptimizer,
+    config: TrainConfig,
+    loss_fn: Callable,
+) -> tuple[Module, NGOptimizer]:
+    """Runs the training loop with a gradient-free optimizer.
+
+    Assumes that `loss_fn` returns a tuple of (loss, metrics: dict), where
+    `metrics` is a dict of scalars. Loss and metrics are written to
+    tensorboard. Checkpoints are written every `config.checkpoint_every` steps
+    (and after the last training step).  If a checkpoint is found at `config.folder`
+    we resume training from there.  The tensorboard logs can be viewed via
+    `tensorboard --logdir /path/to/folder`.
+
+    Args:
+        model: The model to train
+        dataloader: Dataloader constructed via `dictdataloader`
+        optimizer: The optimizer to use taken from the Nevergrad library. If this is not
+            the case the function will raise an AssertionError
+        config: `TrainConfig` with additional training options.
+        loss_fn: Loss function returning (loss: float, metrics: dict[str, float])
+    """
+    init_iter = 0
+    if config.folder:
+        model, optimizer, init_iter = load_checkpoint(config.folder, model, optimizer)
+        logger.debug(f"Loaded model and optimizer from {config.folder}")
+
+    def _update_parameters(
+        data: Tensor | None, ng_params: ng.p.Array
+    ) -> tuple[float, dict, ng.p.Array]:
+        loss, metrics = loss_fn(model, data)  # type: ignore[misc]
+        optimizer.tell(ng_params, float(loss))
+        ng_params = optimizer.ask()  # type: ignore [assignment]
+        params = promote_to_tensor(ng_params.value, requires_grad=False)
+        set_parameters(model, params)
+        return loss, metrics, ng_params
+
+    assert loss_fn is not None, "Provide a valid loss function"
+    # TODO: support also Scipy optimizers
+    assert isinstance(optimizer, NGOptimizer), "Use only optimizers from the Nevergrad library"
+
+    # initialize tracking tool
+    if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
+        writer = SummaryWriter(config.folder, purge_step=init_iter)
+    else:
+        writer = importlib.import_module("mlflow")
+
+    # set optimizer configuration and initial parameters
+    optimizer.budget = config.max_iter
+    optimizer.enable_pickling()
+
+    # TODO: Make it GPU compatible if possible
+    params = get_parameters(model).detach().numpy()
+    ng_params = ng.p.Array(init=params)
+
+    # serial training
+    # TODO: Add a parallelization using the num_workers argument in Nevergrad
+    progress = Progress(
+        TextColumn("[progress.description]{task.description}"),
+        BarColumn(),
+        TaskProgressColumn(),
+        TimeRemainingColumn(elapsed_when_finished=True),
+    )
+    with progress:
+        dl_iter = iter(dataloader) if dataloader is not None else None
+
+        for iteration in progress.track(range(init_iter, init_iter + config.max_iter)):
+            if dataloader is None:
+                loss, metrics, ng_params = _update_parameters(None, ng_params)
+
+            elif isinstance(dataloader, (DictDataLoader, DataLoader)):
+                data = next(dl_iter)  # type: ignore[arg-type]
+                loss, metrics, ng_params = _update_parameters(data, ng_params)
+
+            else:
+                raise NotImplementedError("Unsupported dataloader type!")
+
+            if iteration % config.print_every == 0 and config.verbose:
+                print_metrics(loss, metrics, iteration)
+
+            if iteration % config.write_every == 0:
+                write_tracker(writer, loss, metrics, iteration, tracking_tool=config.tracking_tool)
+
+            if iteration % config.plot_every == 0:
+                plot_tracker(
+                    writer,
+                    model,
+                    iteration,
+                    config.plotting_functions,
+                    tracking_tool=config.tracking_tool,
+                )
+
+            if config.folder:
+                if iteration % config.checkpoint_every == 0:
+                    write_checkpoint(config.folder, model, optimizer, iteration)
+
+            if iteration >= init_iter + config.max_iter:
+                break
+
+    # writing hyperparameters
+    if config.hyperparams:
+        log_tracker(writer, config.hyperparams, metrics, tracking_tool=config.tracking_tool)
+
+    if config.log_model:
+        log_model_tracker(writer, model, dataloader, tracking_tool=config.tracking_tool)
+
+    # Final writing and checkpointing
+    if config.folder:
+        write_checkpoint(config.folder, model, optimizer, iteration)
+    write_tracker(writer, loss, metrics, iteration, tracking_tool=config.tracking_tool)
+
+    # close tracker
+    if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
+        writer.close()
+    elif config.tracking_tool == ExperimentTrackingTool.MLFLOW:
+        writer.end_run()
+
+    return model, optimizer
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DictDataLoader(dataloaders) + + + dataclass + + +

+ + +
+ + +

This class only holds a dictionary of DataLoaders and samples from them.

+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ InfiniteTensorDataset(*tensors) + +

+ + +
+

+ Bases: IterableDataset

+ + +

Randomly sample points from the first dimension of the given tensors.

+

Behaves like a normal torch Dataset just that we can sample from it as +many times as we want.

+

Examples: +

import torch
+from qadence.ml_tools.data import InfiniteTensorDataset
+
+x_data, y_data = torch.rand(5,2), torch.ones(5,1)
+# The dataset accepts any number of tensors with the same batch dimension
+ds = InfiniteTensorDataset(x_data, y_data)
+
+# call `next` to get one sample from each tensor:
+xs = next(iter(ds))
+
+
(tensor([0.2803, 0.6621]), tensor([1.]))
+

+ +
+ Source code in qadence/ml_tools/data.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
def __init__(self, *tensors: Tensor):
+    """Randomly sample points from the first dimension of the given tensors.
+
+    Behaves like a normal torch `Dataset` just that we can sample from it as
+    many times as we want.
+
+    Examples:
+    ```python exec="on" source="above" result="json"
+    import torch
+    from qadence.ml_tools.data import InfiniteTensorDataset
+
+    x_data, y_data = torch.rand(5,2), torch.ones(5,1)
+    # The dataset accepts any number of tensors with the same batch dimension
+    ds = InfiniteTensorDataset(x_data, y_data)
+
+    # call `next` to get one sample from each tensor:
+    xs = next(iter(ds))
+    print(str(xs)) # markdown-exec: hide
+    ```
+    """
+    self.tensors = tensors
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ data_to_device(xs, *args, **kwargs) + +

+ + +
+ +

Utility method to move arbitrary data to 'device'.

+ +
+ Source code in qadence/ml_tools/data.py +
84
+85
+86
+87
@singledispatch
+def data_to_device(xs: Any, *args: Any, **kwargs: Any) -> Any:
+    """Utility method to move arbitrary data to 'device'."""
+    raise ValueError(f"Unable to move {type(xs)} with input args: {args} and kwargs: {kwargs}.")
+
+
+
+ +
+ +
+ + +

+ to_dataloader(*tensors, batch_size=1, infinite=False) + +

+ + +
+ +

Convert torch tensors an (infinite) Dataloader.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
*tensors +
+

Torch tensors to use in the dataloader.

+
+

+ + TYPE: + Tensor + + + DEFAULT: + () + +

+
batch_size +
+

batch size of sampled tensors

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
infinite +
+

if True, the dataloader will keep sampling indefinitely even after the whole +dataset was sampled once

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
+

Examples:

+
import torch
+from qadence.ml_tools import to_dataloader
+
+(x, y, z) = [torch.rand(10) for _ in range(3)]
+loader = iter(to_dataloader(x, y, z, batch_size=5, infinite=True))
+print(next(loader))
+print(next(loader))
+print(next(loader))
+
+
[tensor([0.5918, 0.4250, 0.3757, 0.9884, 0.8143]), tensor([0.4384, 0.9686, 0.9132, 0.3721, 0.3133]), tensor([0.3102, 0.1979, 0.5804, 0.9381, 0.6248])]
+[tensor([0.9080, 0.2054, 0.6696, 0.8955, 0.0278]), tensor([0.4845, 0.8910, 0.6927, 0.5117, 0.7474]), tensor([0.0204, 0.6286, 0.5215, 0.6765, 0.2913])]
+[tensor([0.5918, 0.4250, 0.3757, 0.9884, 0.8143]), tensor([0.4384, 0.9686, 0.9132, 0.3721, 0.3133]), tensor([0.3102, 0.1979, 0.5804, 0.9381, 0.6248])]
+
+ +
+ Source code in qadence/ml_tools/data.py +
58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def to_dataloader(*tensors: Tensor, batch_size: int = 1, infinite: bool = False) -> DataLoader:
+    """Convert torch tensors an (infinite) Dataloader.
+
+    Arguments:
+        *tensors: Torch tensors to use in the dataloader.
+        batch_size: batch size of sampled tensors
+        infinite: if `True`, the dataloader will keep sampling indefinitely even after the whole
+            dataset was sampled once
+
+    Examples:
+
+    ```python exec="on" source="above" result="json"
+    import torch
+    from qadence.ml_tools import to_dataloader
+
+    (x, y, z) = [torch.rand(10) for _ in range(3)]
+    loader = iter(to_dataloader(x, y, z, batch_size=5, infinite=True))
+    print(next(loader))
+    print(next(loader))
+    print(next(loader))
+    ```
+    """
+    ds = InfiniteTensorDataset(*tensors) if infinite else TensorDataset(*tensors)
+    return DataLoader(ds, batch_size=batch_size)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QNN(circuit, observable, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, configuration=None, inputs=None, input_diff_mode=InputDiffMode.AD) + +

+ + +
+

+ Bases: QuantumModel

+ + +

Quantum neural network model for n-dimensional inputs.

+

Examples: +

import torch
+from qadence import QuantumCircuit, QNN, Z
+from qadence import hea, feature_map, hamiltonian_factory, kron
+
+# create the circuit
+n_qubits, depth = 2, 4
+fm = kron(
+    feature_map(1, support=(0,), param="x"),
+    feature_map(1, support=(1,), param="y")
+)
+ansatz = hea(n_qubits=n_qubits, depth=depth)
+circuit = QuantumCircuit(n_qubits, fm, ansatz)
+obs_base = hamiltonian_factory(n_qubits, detuning=Z)
+
+# the QNN will yield two outputs
+obs = [2.0 * obs_base, 4.0 * obs_base]
+
+# initialize and use the model
+qnn = QNN(circuit, obs, inputs=["x", "y"])
+y = qnn(torch.rand(3, 2))
+
+
+ +
tensor([[1.2132, 2.4264],
+        [0.7613, 1.5226],
+        [1.2638, 2.5276]], grad_fn=<CatBackward0>)
+
+ +

+ +

Initialize the QNN.

+

The number of inputs is determined by the feature parameters in the input +quantum circuit while the number of outputs is determined by how many +observables are provided as input

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

The quantum circuit to use for the QNN.

+
+

+ + TYPE: + QuantumCircuit + +

+
observable +
+

The observable.

+
+

+ + TYPE: + list[AbstractBlock] | AbstractBlock + +

+
backend +
+

The chosen quantum backend.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

The differentiation engine to use. Choices 'gpsr' or 'ad'.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
measurement +
+

optional measurement protocol. If None, +use exact expectation value with a statevector simulator

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
configuration +
+

optional configuration for the backend

+
+

+ + TYPE: + BackendConfiguration | dict | None + + + DEFAULT: + None + +

+
inputs +
+

List that indicates the order of variables of the tensors that are passed +to the model. Given input tensors xs = torch.rand(batch_size, input_size:=2) a QNN +with inputs=["t", "x"] will assign t, x = xs[:,0], xs[:,1].

+
+

+ + TYPE: + list[Basic | str] | None + + + DEFAULT: + None + +

+
input_diff_mode +
+

The differentiation mode for the input tensor.

+
+

+ + TYPE: + InputDiffMode | str + + + DEFAULT: + AD + +

+
+ +
+ Source code in qadence/ml_tools/models.py +
def __init__(
+    self,
+    circuit: QuantumCircuit,
+    observable: list[AbstractBlock] | AbstractBlock,
+    backend: BackendName = BackendName.PYQTORCH,
+    diff_mode: DiffMode = DiffMode.AD,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    configuration: BackendConfiguration | dict | None = None,
+    inputs: list[sympy.Basic | str] | None = None,
+    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
+):
+    """Initialize the QNN.
+
+    The number of inputs is determined by the feature parameters in the input
+    quantum circuit while the number of outputs is determined by how many
+    observables are provided as input
+
+    Args:
+        circuit: The quantum circuit to use for the QNN.
+        observable: The observable.
+        backend: The chosen quantum backend.
+        diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
+        measurement: optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator
+        noise: A noise model to use.
+        configuration: optional configuration for the backend
+        inputs: List that indicates the order of variables of the tensors that are passed
+            to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
+            with `inputs=["t", "x"]` will assign `t, x = xs[:,0], xs[:,1]`.
+        input_diff_mode: The differentiation mode for the input tensor.
+    """
+    super().__init__(
+        circuit,
+        observable=observable,
+        backend=backend,
+        diff_mode=diff_mode,
+        measurement=measurement,
+        configuration=configuration,
+        noise=noise,
+    )
+    if self._observable is None:
+        raise ValueError("You need to provide at least one observable in the QNN constructor")
+    if (inputs is not None) and (len(self.inputs) == len(inputs)):
+        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs]  # type: ignore[union-attr]
+    elif (inputs is None) and len(self.inputs) <= 1:
+        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs]  # type: ignore[union-attr]
+    else:
+        raise ValueError(
+            """
+            Your QNN has more than one input. Please provide a list of inputs in the order of
+            your tensor domain. For example, if you want to pass
+            `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
+            ```
+            t = x[:,0]
+            x = x[:,1]
+            y = x[:,2]
+            ```
+            you have to specify
+            ```
+            QNN(circuit, observable, inputs=["t", "x", "y"])
+            ```
+            You can also pass a list of sympy symbols.
+        """
+        )
+    self.format_to_dict = format_to_dict_fn(self.inputs)  # type: ignore[arg-type]
+    self.input_diff_mode = InputDiffMode(input_diff_mode)
+    if self.input_diff_mode == InputDiffMode.FD:
+        from qadence.backends.utils import finitediff
+
+        self.__derivative = finitediff
+    elif self.input_diff_mode == InputDiffMode.AD:
+        self.__derivative = _torch_derivative  # type: ignore[assignment]
+    else:
+        raise ValueError(f"Unkown forward diff mode: {self.input_diff_mode}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ forward(values=None, state=None, measurement=None, noise=None, endianness=Endianness.BIG) + +

+ + +
+ +

Forward pass of the model.

+

This returns the (differentiable) expectation value of the given observable +operator defined in the constructor. Differently from the base QuantumModel +class, the QNN accepts also a tensor as input for the forward pass. The +tensor is expected to have shape: n_batches x in_features where n_batches +is the number of data points and in_features is the dimensionality of the problem

+

The output of the forward pass is the expectation value of the input +observable(s). If a single observable is given, the output shape is +n_batches while if multiple observables are given the output shape +is instead n_batches x n_observables

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

the values of the feature parameters

+
+

+ + TYPE: + dict[str, Tensor] | Tensor + + + DEFAULT: + None + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
measurement +
+

optional measurement protocol. If None, +use exact expectation value with a statevector simulator

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ Tensor + +
+

a tensor with the expectation value of the observables passed +in the constructor of the model

+
+

+ + TYPE: + Tensor + +

+
+ +
+ Source code in qadence/ml_tools/models.py +
def forward(
+    self,
+    values: dict[str, Tensor] | Tensor = None,
+    state: Tensor | None = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> Tensor:
+    """Forward pass of the model.
+
+    This returns the (differentiable) expectation value of the given observable
+    operator defined in the constructor. Differently from the base QuantumModel
+    class, the QNN accepts also a tensor as input for the forward pass. The
+    tensor is expected to have shape: `n_batches x in_features` where `n_batches`
+    is the number of data points and `in_features` is the dimensionality of the problem
+
+    The output of the forward pass is the expectation value of the input
+    observable(s). If a single observable is given, the output shape is
+    `n_batches` while if multiple observables are given the output shape
+    is instead `n_batches x n_observables`
+
+    Args:
+        values: the values of the feature parameters
+        state: Initial state.
+        measurement: optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator
+        noise: A noise model to use.
+        endianness: Endianness of the resulting bit strings.
+
+    Returns:
+        Tensor: a tensor with the expectation value of the observables passed
+            in the constructor of the model
+    """
+    return self.expectation(
+        values, state=state, measurement=measurement, noise=noise, endianness=endianness
+    )
+
+
+
+ +
+ +
+ + +

+ from_configs(register, obs_config, fm_config=FeatureMapConfig(), ansatz_config=AnsatzConfig(), backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, configuration=None, input_diff_mode=InputDiffMode.AD) + + + classmethod + + +

+ + +
+ +

Create a QNN from a set of configurations.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
register +
+

The number of qubits or a register object.

+
+

+ + TYPE: + int | Register + +

+
obs_config +
+

The configuration(s) +for the observable(s).

+
+

+ + TYPE: + list[ObservableConfig] | ObservableConfig + +

+
fm_config +
+

The configuration for the feature map. +Defaults to no feature encoding block.

+
+

+ + TYPE: + FeatureMapConfig + + + DEFAULT: + FeatureMapConfig() + +

+
ansatz_config +
+

The configuration for the ansatz. +Defaults to a single layer of hardware efficient ansatz.

+
+

+ + TYPE: + AnsatzConfig + + + DEFAULT: + AnsatzConfig() + +

+
backend +
+

The chosen quantum backend.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

The differentiation engine to use. Choices are +'gpsr' or 'ad'.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
measurement +
+

Optional measurement protocol. If None, +use exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise + + + DEFAULT: + None + +

+
configuration +
+

Optional backend configuration.

+
+

+ + TYPE: + BackendConfiguration | dict + + + DEFAULT: + None + +

+
input_diff_mode +
+

The differentiation mode for the input tensor.

+
+

+ + TYPE: + InputDiffMode + + + DEFAULT: + AD + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + QNN + + +
+

A QNN object.

+
+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + ValueError + + +
+

If the observable configuration is not provided.

+
+
+

Example: +

import torch
+from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
+from qadence.ml_tools import QNN
+from qadence.constructors import ObservableConfig
+from qadence.operations import Z
+from qadence.types import (
+    AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
+)
+
+register = 4
+obs_config = ObservableConfig(
+    detuning=Z,
+    scale=5.0,
+    shift=0.0,
+    transformation_type=ObservableTransform.SCALE,
+    trainable_transform=None,
+)
+fm_config = FeatureMapConfig(
+    num_features=2,
+    inputs=["x", "y"],
+    basis_set=BasisSet.FOURIER,
+    reupload_scaling=ReuploadScaling.CONSTANT,
+    feature_range={
+        "x": (-1.0, 1.0),
+        "y": (0.0, 1.0),
+    },
+)
+ansatz_config = AnsatzConfig(
+    depth=2,
+    ansatz_type=AnsatzType.HEA,
+    ansatz_strategy=Strategy.DIGITAL,
+)
+
+qnn = QNN.from_configs(
+    register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
+)
+
+x = torch.rand(2, 2)
+y = qnn(x)
+
+
+ +
tensor([[5.8656],
+        [6.6375]], grad_fn=<CatBackward0>)
+
+ +

+ +
+ Source code in qadence/ml_tools/models.py +
@classmethod
+def from_configs(
+    cls,
+    register: int | Register,
+    obs_config: Any,
+    fm_config: Any = FeatureMapConfig(),
+    ansatz_config: Any = AnsatzConfig(),
+    backend: BackendName = BackendName.PYQTORCH,
+    diff_mode: DiffMode = DiffMode.AD,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    configuration: BackendConfiguration | dict | None = None,
+    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
+) -> QNN:
+    """Create a QNN from a set of configurations.
+
+    Args:
+        register (int | Register): The number of qubits or a register object.
+        obs_config (list[ObservableConfig] | ObservableConfig): The configuration(s)
+            for the observable(s).
+        fm_config (FeatureMapConfig): The configuration for the feature map.
+            Defaults to no feature encoding block.
+        ansatz_config (AnsatzConfig): The configuration for the ansatz.
+            Defaults to a single layer of hardware efficient ansatz.
+        backend (BackendName): The chosen quantum backend.
+        diff_mode (DiffMode): The differentiation engine to use. Choices are
+            'gpsr' or 'ad'.
+        measurement (Measurements): Optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator.
+        noise (Noise): A noise model to use.
+        configuration (BackendConfiguration | dict): Optional backend configuration.
+        input_diff_mode (InputDiffMode): The differentiation mode for the input tensor.
+
+    Returns:
+        A QNN object.
+
+    Raises:
+        ValueError: If the observable configuration is not provided.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
+    from qadence.ml_tools import QNN
+    from qadence.constructors import ObservableConfig
+    from qadence.operations import Z
+    from qadence.types import (
+        AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
+    )
+
+    register = 4
+    obs_config = ObservableConfig(
+        detuning=Z,
+        scale=5.0,
+        shift=0.0,
+        transformation_type=ObservableTransform.SCALE,
+        trainable_transform=None,
+    )
+    fm_config = FeatureMapConfig(
+        num_features=2,
+        inputs=["x", "y"],
+        basis_set=BasisSet.FOURIER,
+        reupload_scaling=ReuploadScaling.CONSTANT,
+        feature_range={
+            "x": (-1.0, 1.0),
+            "y": (0.0, 1.0),
+        },
+    )
+    ansatz_config = AnsatzConfig(
+        depth=2,
+        ansatz_type=AnsatzType.HEA,
+        ansatz_strategy=Strategy.DIGITAL,
+    )
+
+    qnn = QNN.from_configs(
+        register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
+    )
+
+    x = torch.rand(2, 2)
+    y = qnn(x)
+    print(str(y)) # markdown-exec: hide
+    ```
+    """
+    from .constructors import build_qnn_from_configs
+
+    return build_qnn_from_configs(
+        register=register,
+        observable_config=obs_config,
+        fm_config=fm_config,
+        ansatz_config=ansatz_config,
+        backend=backend,
+        diff_mode=diff_mode,
+        measurement=measurement,
+        noise=noise,
+        configuration=configuration,
+        input_diff_mode=input_diff_mode,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ derivative(ufa, x, derivative_indices) + +

+ + +
+ +

Compute derivatives w.r.t.

+

inputs of a UFA with a single output. The +derivative_indices specify which derivative(s) are computed. E.g. +derivative_indices=(1,2) would compute the a second order derivative w.r.t +to the indices 1 and 2 of the input tensor.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
ufa +
+

The model for which we want to compute the derivative.

+
+

+ + TYPE: + Module + +

+
x +
+

(batch_size, input_size) input tensor.

+
+

+ + TYPE: + Tensor + +

+
derivative_indices +
+

Define which derivatives to compute.

+
+

+ + TYPE: + tuple + +

+
+

Examples: +If we create a UFA with three inputs and denote the first, second, and third +input with x, y, and z we can compute the following derivatives w.r.t +to those inputs: +

import torch
+from qadence.ml_tools.models import derivative, QNN
+from qadence.ml_tools.config import FeatureMapConfig, AnsatzConfig
+from qadence.constructors.hamiltonians import ObservableConfig
+from qadence.operations import Z
+
+fm_config = FeatureMapConfig(num_features=3, inputs=["x", "y", "z"])
+ansatz_config = AnsatzConfig()
+obs_config = ObservableConfig(detuning=Z)
+
+f = QNN.from_configs(
+    register=3, obs_config=obs_config, fm_config=fm_config, ansatz_config=ansatz_config,
+)
+inputs = torch.rand(5,3,requires_grad=True)
+
+# df_dx
+derivative(f, inputs, (0,))
+
+# d2f_dydz
+derivative(f, inputs, (1,2))
+
+# d3fdy2dx
+derivative(f, inputs, (1,1,0))
+
+
+ + + +

+ +
+ Source code in qadence/ml_tools/models.py +
36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
def derivative(ufa: torch.nn.Module, x: Tensor, derivative_indices: tuple[int, ...]) -> Tensor:
+    """Compute derivatives w.r.t.
+
+    inputs of a UFA with a single output. The
+    `derivative_indices` specify which derivative(s) are computed.  E.g.
+    `derivative_indices=(1,2)` would compute the a second order derivative w.r.t
+    to the indices `1` and `2` of the input tensor.
+
+    Arguments:
+        ufa: The model for which we want to compute the derivative.
+        x (Tensor): (batch_size, input_size) input tensor.
+        derivative_indices (tuple): Define which derivatives to compute.
+
+    Examples:
+    If we create a UFA with three inputs and denote the first, second, and third
+    input with `x`, `y`, and `z` we can compute the following derivatives w.r.t
+    to those inputs:
+    ```py exec="on" source="material-block"
+    import torch
+    from qadence.ml_tools.models import derivative, QNN
+    from qadence.ml_tools.config import FeatureMapConfig, AnsatzConfig
+    from qadence.constructors.hamiltonians import ObservableConfig
+    from qadence.operations import Z
+
+    fm_config = FeatureMapConfig(num_features=3, inputs=["x", "y", "z"])
+    ansatz_config = AnsatzConfig()
+    obs_config = ObservableConfig(detuning=Z)
+
+    f = QNN.from_configs(
+        register=3, obs_config=obs_config, fm_config=fm_config, ansatz_config=ansatz_config,
+    )
+    inputs = torch.rand(5,3,requires_grad=True)
+
+    # df_dx
+    derivative(f, inputs, (0,))
+
+    # d2f_dydz
+    derivative(f, inputs, (1,2))
+
+    # d3fdy2dx
+    derivative(f, inputs, (1,1,0))
+    ```
+    """
+    assert ufa.out_features == 1, "Can only call `derivative` on models with 1D output."
+    return ufa._derivative(x, derivative_indices)
+
+
+
+ +
+ +
+ + +

+ format_to_dict_fn(inputs=[]) + +

+ + +
+ +

Format an input tensor into the format required by the forward pass.

+

The tensor is assumed to have dimensions: n_batches x in_features where in_features +corresponds to the number of input features of the QNN

+ +
+ Source code in qadence/ml_tools/models.py +
def format_to_dict_fn(
+    inputs: list[sympy.Symbol | str] = [],
+) -> Callable[[Tensor | ParamDictType], ParamDictType]:
+    """Format an input tensor into the format required by the forward pass.
+
+    The tensor is assumed to have dimensions: n_batches x in_features where in_features
+    corresponds to the number of input features of the QNN
+    """
+    in_features = len(inputs)
+
+    def tensor_to_dict(values: Tensor | ParamDictType) -> ParamDictType:
+        if isinstance(values, Tensor):
+            values = values.reshape(-1, 1) if len(values.size()) == 1 else values
+            if not values.shape[1] == in_features:
+                raise ValueError(
+                    f"Model expects in_features={in_features} but got {values.shape[1]}."
+                )
+            values = {fparam.name: values[:, inputs.index(fparam)] for fparam in inputs}  # type: ignore[union-attr]
+        return values
+
+    return tensor_to_dict
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/models/index.html b/v1.7.4/api/models/index.html new file mode 100644 index 000000000..9ab09f494 --- /dev/null +++ b/v1.7.4/api/models/index.html @@ -0,0 +1,6142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Quantum models - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Quantum models

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QuantumModel(circuit, observable=None, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, mitigation=None, configuration=None) + +

+ + +
+

+ Bases: Module

+ + +

The central class of qadence that executes QuantumCircuits and make them differentiable.

+

This class should be used as base class for any new quantum model supported in the qadence +framework for information on the implementation of custom models see +here.

+

Example: +

import torch
+from qadence import QuantumModel, QuantumCircuit, RX, RY, Z, PI, chain, kron
+from qadence import FeatureParameter, VariationalParameter
+
+theta = VariationalParameter("theta")
+phi = FeatureParameter("phi")
+
+block = chain(
+    kron(RX(0, theta), RY(1, theta)),
+    kron(RX(0, phi), RY(1, phi)),
+)
+
+circuit = QuantumCircuit(2, block)
+
+observable = Z(0) + Z(1)
+
+model = QuantumModel(circuit, observable)
+values = {"phi": torch.tensor([PI, PI/2]), "theta": torch.tensor([PI, PI/2])}
+
+wf = model.run(values)
+xs = model.sample(values, n_shots=100)
+ex = model.expectation(values)
+print(wf)
+print(xs)
+print(ex)
+
+
+ +
tensor([[ 1.0000e+00+0.0000e+00j, -1.2246e-16+0.0000e+00j,
+          0.0000e+00+1.2246e-16j,  0.0000e+00-1.4998e-32j],
+        [ 4.9304e-32+0.0000e+00j,  2.2204e-16+0.0000e+00j,
+          0.0000e+00-2.2204e-16j,  0.0000e+00-1.0000e+00j]])
+[Counter({'00': 100}), Counter({'11': 100})]
+tensor([[ 2.],
+        [-2.]], requires_grad=True)
+
+ +
+```

+ +

Initialize a generic QuantumModel instance.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

The circuit that is executed.

+
+

+ + TYPE: + QuantumCircuit + +

+
observable +
+

Optional observable(s) that are used only in the expectation method. You +can also provide observables on the fly to the expectation call directly.

+
+

+ + TYPE: + list[AbstractBlock] | AbstractBlock | None + + + DEFAULT: + None + +

+
backend +
+

A backend for circuit execution.

+
+

+ + TYPE: + BackendName | str + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

A differentiability mode. Parameter shift based modes work on all backends. +AD based modes only on PyTorch based backends.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
measurement +
+

Optional measurement protocol. If None, use +exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
configuration +
+

Configuration for the backend.

+
+

+ + TYPE: + BackendConfiguration | dict | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + ValueError + + +
+

if the diff_mode argument is set to None

+
+
+ +
+ Source code in qadence/model.py +
def __init__(
+    self,
+    circuit: QuantumCircuit,
+    observable: list[AbstractBlock] | AbstractBlock | None = None,
+    backend: BackendName | str = BackendName.PYQTORCH,
+    diff_mode: DiffMode = DiffMode.AD,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    configuration: BackendConfiguration | dict | None = None,
+):
+    """Initialize a generic QuantumModel instance.
+
+    Arguments:
+        circuit: The circuit that is executed.
+        observable: Optional observable(s) that are used only in the `expectation` method. You
+            can also provide observables on the fly to the expectation call directly.
+        backend: A backend for circuit execution.
+        diff_mode: A differentiability mode. Parameter shift based modes work on all backends.
+            AD based modes only on PyTorch based backends.
+        measurement: Optional measurement protocol. If None, use
+            exact expectation value with a statevector simulator.
+        configuration: Configuration for the backend.
+        noise: A noise model to use.
+
+    Raises:
+        ValueError: if the `diff_mode` argument is set to None
+    """
+    super().__init__()
+
+    if not isinstance(circuit, QuantumCircuit):
+        TypeError(
+            f"The circuit should be of type '<class QuantumCircuit>'. Got {type(circuit)}."
+        )
+
+    if diff_mode is None:
+        raise ValueError("`diff_mode` cannot be `None` in a `QuantumModel`.")
+
+    self.backend = backend_factory(
+        backend=backend, diff_mode=diff_mode, configuration=configuration
+    )
+
+    if isinstance(observable, list) or observable is None:
+        observable = observable
+    else:
+        observable = [observable]
+
+    def _is_feature_param(p: Parameter) -> bool:
+        return not p.trainable and not p.is_number
+
+    if observable is None:
+        self.inputs = list(filter(_is_feature_param, circuit.unique_parameters))
+    else:
+        uparams = unique_parameters(chain(circuit.block, *observable))
+        self.inputs = list(filter(_is_feature_param, uparams))
+
+    conv = self.backend.convert(circuit, observable)
+    self.embedding_fn = conv.embedding_fn
+    self._circuit = conv.circuit
+    self._observable = conv.observable
+    self._backend_name = backend
+    self._diff_mode = diff_mode
+    self._measurement = measurement
+    self._noise = noise
+    self._mitigation = mitigation
+    self._params = nn.ParameterDict(
+        {
+            str(key): nn.Parameter(val, requires_grad=val.requires_grad)
+            for key, val in conv.params.items()
+        }
+    )
+
+
+ + + +
+ + + + + + + +
+ + + +

+ device: torch.device + + + property + + +

+ + +
+ +

Get device.

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + device + + +
+

torch.device

+
+
+
+ +
+ +
+ + + +

+ in_features: int + + + property + + +

+ + +
+ +

Number of inputs.

+
+ +
+ +
+ + + +

+ num_vparams: int + + + property + + +

+ + +
+ +

The number of variational parameters.

+
+ +
+ +
+ + + +

+ out_features: int | None + + + property + + +

+ + +
+ +

Number of outputs.

+
+ +
+ +
+ + + +

+ vals_vparams: Tensor + + + property + + +

+ + +
+ +

Dictionary with parameters which are actually updated during optimization.

+
+ +
+ +
+ + + +

+ vparams: OrderedDict + + + property + + +

+ + +
+ +

Variational parameters.

+
+ +
+ + + +
+ + +

+ assign_parameters(values) + +

+ + +
+ +

Return the final, assigned circuit that is used in e.g. backend.run.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

Values dict which contains values for the parameters.

+
+

+ + TYPE: + dict[str, Tensor] + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Any + + +
+

Final, assigned circuit that is used in e.g. backend.run

+
+
+ +
+ Source code in qadence/model.py +
def assign_parameters(self, values: dict[str, Tensor]) -> Any:
+    """Return the final, assigned circuit that is used in e.g. `backend.run`.
+
+    Arguments:
+        values: Values dict which contains values for the parameters.
+
+    Returns:
+        Final, assigned circuit that is used in e.g. `backend.run`
+    """
+    params = self.embedding_fn(self._params, values)
+    return self.backend.assign_parameters(self._circuit, params)
+
+
+
+ +
+ +
+ + +

+ circuit(circuit) + +

+ + +
+ +

Get backend-converted circuit.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

QuantumCircuit instance.

+
+

+ + TYPE: + QuantumCircuit + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConvertedCircuit + + +
+

Backend circuit.

+
+
+ +
+ Source code in qadence/model.py +
def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
+    """Get backend-converted circuit.
+
+    Args:
+        circuit: QuantumCircuit instance.
+
+    Returns:
+        Backend circuit.
+    """
+    return self.backend.circuit(circuit)
+
+
+
+ +
+ +
+ + +

+ expectation(values={}, observable=None, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG) + +

+ + +
+ +

Compute expectation using the given backend.

+

Given an input state \(|\psi_0 \rangle\), +a set of variational parameters \(\vec{\theta}\) +and the unitary representation of the model \(U(\vec{\theta})\) +we return \(\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle\).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

Values dict which contains values for the parameters.

+
+

+ + TYPE: + dict[str, Tensor] + + + DEFAULT: + {} + +

+
observable +
+

Observable part of the expectation.

+
+

+ + TYPE: + list[ConvertedObservable] | ConvertedObservable | None + + + DEFAULT: + None + +

+
state +
+

Optional input state.

+
+

+ + TYPE: + Optional[Tensor] + + + DEFAULT: + None + +

+
measurement +
+

Optional measurement protocol. If None, use +exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
mitigation +
+

A mitigation protocol to use.

+
+

+ + TYPE: + Mitigations | None + + + DEFAULT: + None + +

+
endianness +
+

Storage convention for binary information.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + ValueError + + +
+

when no observable is set.

+
+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor of shape n_batches x n_obs

+
+
+ +
+ Source code in qadence/model.py +
def expectation(
+    self,
+    values: dict[str, Tensor] = {},
+    observable: list[ConvertedObservable] | ConvertedObservable | None = None,
+    state: Optional[Tensor] = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> Tensor:
+    r"""Compute expectation using the given backend.
+
+
+
+    Given an input state $|\psi_0 \rangle$,
+    a set of variational parameters $\vec{\theta}$
+    and the unitary representation of the model $U(\vec{\theta})$
+    we return $\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle$.
+
+    Arguments:
+        values: Values dict which contains values for the parameters.
+        observable: Observable part of the expectation.
+        state: Optional input state.
+        measurement: Optional measurement protocol. If None, use
+            exact expectation value with a statevector simulator.
+        noise: A noise model to use.
+        mitigation: A mitigation protocol to use.
+        endianness: Storage convention for binary information.
+
+    Raises:
+        ValueError: when no observable is set.
+
+    Returns:
+        A torch.Tensor of shape n_batches x n_obs
+    """
+    if observable is None:
+        if self._observable is None:
+            raise ValueError(
+                "Provide an AbstractBlock as the observable to compute expectation."
+                "Either pass a 'native_observable' directly to 'QuantumModel.expectation'"
+                "or pass a (non-native) '<class AbstractBlock>' to the 'QuantumModel.__init__'."
+            )
+        observable = self._observable
+
+    params = self.embedding_fn(self._params, values)
+    if measurement is None:
+        measurement = self._measurement
+    if noise is None:
+        noise = self._noise
+    else:
+        self._noise = noise
+    if mitigation is None:
+        mitigation = self._mitigation
+    return self.backend.expectation(
+        circuit=self._circuit,
+        observable=observable,
+        param_values=params,
+        state=state,
+        measurement=measurement,
+        noise=noise,
+        mitigation=mitigation,
+        endianness=endianness,
+    )
+
+
+
+ +
+ +
+ + +

+ forward(*args, **kwargs) + +

+ + +
+ +

Calls run method with arguments.

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ Tensor + +
+

A torch.Tensor representing output.

+
+

+ + TYPE: + Tensor + +

+
+ +
+ Source code in qadence/model.py +
def forward(self, *args: Any, **kwargs: Any) -> Tensor:
+    """Calls run method with arguments.
+
+    Returns:
+        Tensor: A torch.Tensor representing output.
+    """
+    return self.run(*args, **kwargs)
+
+
+
+ +
+ +
+ + +

+ load(file_path, as_torch=False, map_location='cpu') + + + classmethod + + +

+ + +
+ +

Load QuantumModel.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
file_path +
+

File path to load model from.

+
+

+ + TYPE: + str | Path + +

+
as_torch +
+

Load parameters as torch tensor. Defaults to False.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
map_location +
+

Location for loading. Defaults to "cpu".

+
+

+ + TYPE: + str | device + + + DEFAULT: + 'cpu' + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + QuantumModel + + +
+

QuantumModel from file_path.

+
+
+ +
+ Source code in qadence/model.py +
@classmethod
+def load(
+    cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu"
+) -> QuantumModel:
+    """Load QuantumModel.
+
+    Arguments:
+        file_path: File path to load model from.
+        as_torch: Load parameters as torch tensor. Defaults to False.
+        map_location (str | torch.device, optional): Location for loading. Defaults to "cpu".
+
+    Returns:
+        QuantumModel from file_path.
+    """
+    qm_pt = {}
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if os.path.isdir(file_path):
+        from qadence.ml_tools.saveload import get_latest_checkpoint_name
+
+        file_path = file_path / get_latest_checkpoint_name(file_path, "model")
+
+    try:
+        qm_pt = torch.load(file_path, map_location=map_location)
+    except Exception as e:
+        logger.error(f"Unable to load QuantumModel due to {e}")
+    return cls._from_dict(qm_pt, as_torch)
+
+
+
+ +
+ +
+ + +

+ observable(observable, n_qubits) + +

+ + +
+ +

Get backend observable.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
observable +
+

Observable block.

+
+

+ + TYPE: + AbstractBlock + +

+
n_qubits +
+

Number of qubits

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Any + + +
+

Backend observable.

+
+
+ +
+ Source code in qadence/model.py +
def observable(self, observable: AbstractBlock, n_qubits: int) -> Any:
+    """Get backend observable.
+
+    Args:
+        observable: Observable block.
+        n_qubits: Number of qubits
+
+    Returns:
+        Backend observable.
+    """
+    return self.backend.observable(observable, n_qubits)
+
+
+
+ +
+ +
+ + +

+ overlap() + +

+ + +
+ +

Overlap of model.

+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + NotImplementedError + + +
+

The overlap method is not implemented for this model.

+
+
+ +
+ Source code in qadence/model.py +
def overlap(self) -> Tensor:
+    """Overlap of model.
+
+    Raises:
+        NotImplementedError: The overlap method is not implemented for this model.
+    """
+    raise NotImplementedError("The overlap method is not implemented for this model.")
+
+
+
+ +
+ +
+ + +

+ reset_vparams(values) + +

+ + +
+ +

Reset all the variational parameters with a given list of values.

+ +
+ Source code in qadence/model.py +
def reset_vparams(self, values: Sequence) -> None:
+    """Reset all the variational parameters with a given list of values."""
+    current_vparams = OrderedDict({k: v for k, v in self._params.items() if v.requires_grad})
+
+    assert (
+        len(values) == self.num_vparams
+    ), "Pass an iterable with the values of all variational parameters"
+    for i, k in enumerate(current_vparams.keys()):
+        current_vparams[k].data = torch.tensor([values[i]])
+
+
+
+ +
+ +
+ + +

+ run(values=None, state=None, endianness=Endianness.BIG) + +

+ + +
+ +

Run model.

+

Given an input state \(| \psi_0 \rangle\), +a set of variational parameters \(\vec{\theta}\) +and the unitary representation of the model \(U(\vec{\theta})\) +we return \(U(\vec{\theta}) | \psi_0 \rangle\).

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

Values dict which contains values for the parameters.

+
+

+ + TYPE: + dict[str, Tensor] + + + DEFAULT: + None + +

+
state +
+

Optional input state to apply model on.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
endianness +
+

Storage convention for binary information.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor representing output.

+
+
+ +
+ Source code in qadence/model.py +
def run(
+    self,
+    values: dict[str, Tensor] = None,
+    state: Tensor | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> Tensor:
+    r"""Run model.
+
+    Given an input state $| \psi_0 \rangle$,
+    a set of variational parameters $\vec{\theta}$
+    and the unitary representation of the model $U(\vec{\theta})$
+    we return $U(\vec{\theta}) | \psi_0 \rangle$.
+
+    Arguments:
+        values: Values dict which contains values for the parameters.
+        state: Optional input state to apply model on.
+        endianness: Storage convention for binary information.
+
+    Returns:
+        A torch.Tensor representing output.
+    """
+    if values is None:
+        values = {}
+
+    params = self.embedding_fn(self._params, values)
+
+    return self.backend.run(self._circuit, params, state=state, endianness=endianness)
+
+
+
+ +
+ +
+ + +

+ sample(values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG) + +

+ + +
+ +

Obtain samples from model.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

Values dict which contains values for the parameters.

+
+

+ + TYPE: + dict[str, Tensor] + + + DEFAULT: + {} + +

+
n_shots +
+

Observable part of the expectation.

+
+

+ + TYPE: + int + + + DEFAULT: + 1000 + +

+
state +
+

Optional input state to apply model on.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
mitigation +
+

A mitigation protocol to use.

+
+

+ + TYPE: + Mitigations | None + + + DEFAULT: + None + +

+
endianness +
+

Storage convention for binary information.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[Counter] + + +
+

A list of Counter instances with the sample results.

+
+
+ +
+ Source code in qadence/model.py +
def sample(
+    self,
+    values: dict[str, torch.Tensor] = {},
+    n_shots: int = 1000,
+    state: torch.Tensor | None = None,
+    noise: Noise | None = None,
+    mitigation: Mitigations | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> list[Counter]:
+    """Obtain samples from model.
+
+    Arguments:
+        values: Values dict which contains values for the parameters.
+        n_shots: Observable part of the expectation.
+        state: Optional input state to apply model on.
+        noise: A noise model to use.
+        mitigation: A mitigation protocol to use.
+        endianness: Storage convention for binary information.
+
+    Returns:
+        A list of Counter instances with the sample results.
+    """
+    params = self.embedding_fn(self._params, values)
+    if noise is None:
+        noise = self._noise
+    if mitigation is None:
+        mitigation = self._mitigation
+    return self.backend.sample(
+        self._circuit,
+        params,
+        n_shots=n_shots,
+        state=state,
+        noise=noise,
+        mitigation=mitigation,
+        endianness=endianness,
+    )
+
+
+
+ +
+ +
+ + +

+ save(folder, file_name='quantum_model.pt', save_params=True) + +

+ + +
+ +

Save model.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
folder +
+

Folder where model is saved.

+
+

+ + TYPE: + str | Path + +

+
file_name +
+

File name for saving model. Defaults to "quantum_model.pt".

+
+

+ + TYPE: + str + + + DEFAULT: + 'quantum_model.pt' + +

+
save_params +
+

Save parameters if True. Defaults to True.

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + FileNotFoundError + + +
+

If folder is not a directory.

+
+
+ +
+ Source code in qadence/model.py +
def save(
+    self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True
+) -> None:
+    """Save model.
+
+    Arguments:
+        folder: Folder where model is saved.
+        file_name: File name for saving model. Defaults to "quantum_model.pt".
+        save_params: Save parameters if True. Defaults to True.
+
+    Raises:
+        FileNotFoundError: If folder is not a directory.
+    """
+    if not os.path.isdir(folder):
+        raise FileNotFoundError
+    try:
+        torch.save(self._to_dict(save_params), folder / Path(file_name))
+    except Exception as e:
+        logger.error(f"Unable to write QuantumModel to disk due to {e}")
+
+
+
+ +
+ +
+ + +

+ to(*args, **kwargs) + +

+ + +
+ +

Conversion method for device or types.

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + QuantumModel + + +
+

QuantumModel with conversions.

+
+
+ +
+ Source code in qadence/model.py +
def to(self, *args: Any, **kwargs: Any) -> QuantumModel:
+    """Conversion method for device or types.
+
+    Returns:
+        QuantumModel with conversions.
+    """
+    from pyqtorch import QuantumCircuit as PyQCircuit
+
+    try:
+        if isinstance(self._circuit.native, PyQCircuit):
+            self._circuit.native = self._circuit.native.to(*args, **kwargs)
+            if self._observable is not None:
+                if isinstance(self._observable, ConvertedObservable):
+                    self._observable.native = self._observable.native.to(*args, **kwargs)
+                elif isinstance(self._observable, list):
+                    for obs in self._observable:
+                        obs.native = obs.native.to(*args, **kwargs)
+            self._params = self._params.to(
+                device=self._circuit.native.device,
+                dtype=(
+                    torch.float64
+                    if self._circuit.native.dtype == torch.cdouble
+                    else torch.float32
+                ),
+            )
+            logger.debug(f"Moved {self} to {args}, {kwargs}.")
+        else:
+            logger.debug("QuantumModel.to only supports pyqtorch.QuantumCircuits.")
+    except Exception as e:
+        logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
+    return self
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+

+ Bases: QuantumModel

+ + +

Quantum neural network model for n-dimensional inputs.

+

Examples: +

import torch
+from qadence import QuantumCircuit, QNN, Z
+from qadence import hea, feature_map, hamiltonian_factory, kron
+
+# create the circuit
+n_qubits, depth = 2, 4
+fm = kron(
+    feature_map(1, support=(0,), param="x"),
+    feature_map(1, support=(1,), param="y")
+)
+ansatz = hea(n_qubits=n_qubits, depth=depth)
+circuit = QuantumCircuit(n_qubits, fm, ansatz)
+obs_base = hamiltonian_factory(n_qubits, detuning=Z)
+
+# the QNN will yield two outputs
+obs = [2.0 * obs_base, 4.0 * obs_base]
+
+# initialize and use the model
+qnn = QNN(circuit, obs, inputs=["x", "y"])
+y = qnn(torch.rand(3, 2))
+
+
+ +
tensor([[1.1125, 2.2250],
+        [1.6425, 3.2850],
+        [1.0712, 2.1424]], grad_fn=<CatBackward0>)
+
+ +

+ +

Initialize the QNN.

+

The number of inputs is determined by the feature parameters in the input +quantum circuit while the number of outputs is determined by how many +observables are provided as input

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
circuit +
+

The quantum circuit to use for the QNN.

+
+

+ + TYPE: + QuantumCircuit + +

+
observable +
+

The observable.

+
+

+ + TYPE: + list[AbstractBlock] | AbstractBlock + +

+
backend +
+

The chosen quantum backend.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

The differentiation engine to use. Choices 'gpsr' or 'ad'.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
measurement +
+

optional measurement protocol. If None, +use exact expectation value with a statevector simulator

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
configuration +
+

optional configuration for the backend

+
+

+ + TYPE: + BackendConfiguration | dict | None + + + DEFAULT: + None + +

+
inputs +
+

List that indicates the order of variables of the tensors that are passed +to the model. Given input tensors xs = torch.rand(batch_size, input_size:=2) a QNN +with inputs=["t", "x"] will assign t, x = xs[:,0], xs[:,1].

+
+

+ + TYPE: + list[Basic | str] | None + + + DEFAULT: + None + +

+
input_diff_mode +
+

The differentiation mode for the input tensor.

+
+

+ + TYPE: + InputDiffMode | str + + + DEFAULT: + AD + +

+
+ +
+ Source code in qadence/ml_tools/models.py +
def __init__(
+    self,
+    circuit: QuantumCircuit,
+    observable: list[AbstractBlock] | AbstractBlock,
+    backend: BackendName = BackendName.PYQTORCH,
+    diff_mode: DiffMode = DiffMode.AD,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    configuration: BackendConfiguration | dict | None = None,
+    inputs: list[sympy.Basic | str] | None = None,
+    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
+):
+    """Initialize the QNN.
+
+    The number of inputs is determined by the feature parameters in the input
+    quantum circuit while the number of outputs is determined by how many
+    observables are provided as input
+
+    Args:
+        circuit: The quantum circuit to use for the QNN.
+        observable: The observable.
+        backend: The chosen quantum backend.
+        diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
+        measurement: optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator
+        noise: A noise model to use.
+        configuration: optional configuration for the backend
+        inputs: List that indicates the order of variables of the tensors that are passed
+            to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
+            with `inputs=["t", "x"]` will assign `t, x = xs[:,0], xs[:,1]`.
+        input_diff_mode: The differentiation mode for the input tensor.
+    """
+    super().__init__(
+        circuit,
+        observable=observable,
+        backend=backend,
+        diff_mode=diff_mode,
+        measurement=measurement,
+        configuration=configuration,
+        noise=noise,
+    )
+    if self._observable is None:
+        raise ValueError("You need to provide at least one observable in the QNN constructor")
+    if (inputs is not None) and (len(self.inputs) == len(inputs)):
+        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs]  # type: ignore[union-attr]
+    elif (inputs is None) and len(self.inputs) <= 1:
+        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs]  # type: ignore[union-attr]
+    else:
+        raise ValueError(
+            """
+            Your QNN has more than one input. Please provide a list of inputs in the order of
+            your tensor domain. For example, if you want to pass
+            `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
+            ```
+            t = x[:,0]
+            x = x[:,1]
+            y = x[:,2]
+            ```
+            you have to specify
+            ```
+            QNN(circuit, observable, inputs=["t", "x", "y"])
+            ```
+            You can also pass a list of sympy symbols.
+        """
+        )
+    self.format_to_dict = format_to_dict_fn(self.inputs)  # type: ignore[arg-type]
+    self.input_diff_mode = InputDiffMode(input_diff_mode)
+    if self.input_diff_mode == InputDiffMode.FD:
+        from qadence.backends.utils import finitediff
+
+        self.__derivative = finitediff
+    elif self.input_diff_mode == InputDiffMode.AD:
+        self.__derivative = _torch_derivative  # type: ignore[assignment]
+    else:
+        raise ValueError(f"Unkown forward diff mode: {self.input_diff_mode}")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ forward(values=None, state=None, measurement=None, noise=None, endianness=Endianness.BIG) + +

+ + +
+ +

Forward pass of the model.

+

This returns the (differentiable) expectation value of the given observable +operator defined in the constructor. Differently from the base QuantumModel +class, the QNN accepts also a tensor as input for the forward pass. The +tensor is expected to have shape: n_batches x in_features where n_batches +is the number of data points and in_features is the dimensionality of the problem

+

The output of the forward pass is the expectation value of the input +observable(s). If a single observable is given, the output shape is +n_batches while if multiple observables are given the output shape +is instead n_batches x n_observables

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
values +
+

the values of the feature parameters

+
+

+ + TYPE: + dict[str, Tensor] | Tensor + + + DEFAULT: + None + +

+
state +
+

Initial state.

+
+

+ + TYPE: + Tensor | None + + + DEFAULT: + None + +

+
measurement +
+

optional measurement protocol. If None, +use exact expectation value with a statevector simulator

+
+

+ + TYPE: + Measurements | None + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise | None + + + DEFAULT: + None + +

+
endianness +
+

Endianness of the resulting bit strings.

+
+

+ + TYPE: + Endianness + + + DEFAULT: + BIG + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ Tensor + +
+

a tensor with the expectation value of the observables passed +in the constructor of the model

+
+

+ + TYPE: + Tensor + +

+
+ +
+ Source code in qadence/ml_tools/models.py +
def forward(
+    self,
+    values: dict[str, Tensor] | Tensor = None,
+    state: Tensor | None = None,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    endianness: Endianness = Endianness.BIG,
+) -> Tensor:
+    """Forward pass of the model.
+
+    This returns the (differentiable) expectation value of the given observable
+    operator defined in the constructor. Differently from the base QuantumModel
+    class, the QNN accepts also a tensor as input for the forward pass. The
+    tensor is expected to have shape: `n_batches x in_features` where `n_batches`
+    is the number of data points and `in_features` is the dimensionality of the problem
+
+    The output of the forward pass is the expectation value of the input
+    observable(s). If a single observable is given, the output shape is
+    `n_batches` while if multiple observables are given the output shape
+    is instead `n_batches x n_observables`
+
+    Args:
+        values: the values of the feature parameters
+        state: Initial state.
+        measurement: optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator
+        noise: A noise model to use.
+        endianness: Endianness of the resulting bit strings.
+
+    Returns:
+        Tensor: a tensor with the expectation value of the observables passed
+            in the constructor of the model
+    """
+    return self.expectation(
+        values, state=state, measurement=measurement, noise=noise, endianness=endianness
+    )
+
+
+
+ +
+ +
+ + +

+ from_configs(register, obs_config, fm_config=FeatureMapConfig(), ansatz_config=AnsatzConfig(), backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, configuration=None, input_diff_mode=InputDiffMode.AD) + + + classmethod + + +

+ + +
+ +

Create a QNN from a set of configurations.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
register +
+

The number of qubits or a register object.

+
+

+ + TYPE: + int | Register + +

+
obs_config +
+

The configuration(s) +for the observable(s).

+
+

+ + TYPE: + list[ObservableConfig] | ObservableConfig + +

+
fm_config +
+

The configuration for the feature map. +Defaults to no feature encoding block.

+
+

+ + TYPE: + FeatureMapConfig + + + DEFAULT: + FeatureMapConfig() + +

+
ansatz_config +
+

The configuration for the ansatz. +Defaults to a single layer of hardware efficient ansatz.

+
+

+ + TYPE: + AnsatzConfig + + + DEFAULT: + AnsatzConfig() + +

+
backend +
+

The chosen quantum backend.

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
diff_mode +
+

The differentiation engine to use. Choices are +'gpsr' or 'ad'.

+
+

+ + TYPE: + DiffMode + + + DEFAULT: + AD + +

+
measurement +
+

Optional measurement protocol. If None, +use exact expectation value with a statevector simulator.

+
+

+ + TYPE: + Measurements + + + DEFAULT: + None + +

+
noise +
+

A noise model to use.

+
+

+ + TYPE: + Noise + + + DEFAULT: + None + +

+
configuration +
+

Optional backend configuration.

+
+

+ + TYPE: + BackendConfiguration | dict + + + DEFAULT: + None + +

+
input_diff_mode +
+

The differentiation mode for the input tensor.

+
+

+ + TYPE: + InputDiffMode + + + DEFAULT: + AD + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + QNN + + +
+

A QNN object.

+
+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + ValueError + + +
+

If the observable configuration is not provided.

+
+
+

Example: +

import torch
+from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
+from qadence.ml_tools import QNN
+from qadence.constructors import ObservableConfig
+from qadence.operations import Z
+from qadence.types import (
+    AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
+)
+
+register = 4
+obs_config = ObservableConfig(
+    detuning=Z,
+    scale=5.0,
+    shift=0.0,
+    transformation_type=ObservableTransform.SCALE,
+    trainable_transform=None,
+)
+fm_config = FeatureMapConfig(
+    num_features=2,
+    inputs=["x", "y"],
+    basis_set=BasisSet.FOURIER,
+    reupload_scaling=ReuploadScaling.CONSTANT,
+    feature_range={
+        "x": (-1.0, 1.0),
+        "y": (0.0, 1.0),
+    },
+)
+ansatz_config = AnsatzConfig(
+    depth=2,
+    ansatz_type=AnsatzType.HEA,
+    ansatz_strategy=Strategy.DIGITAL,
+)
+
+qnn = QNN.from_configs(
+    register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
+)
+
+x = torch.rand(2, 2)
+y = qnn(x)
+
+
+ +
tensor([[2.9797],
+        [0.7951]], grad_fn=<CatBackward0>)
+
+ +

+ +
+ Source code in qadence/ml_tools/models.py +
@classmethod
+def from_configs(
+    cls,
+    register: int | Register,
+    obs_config: Any,
+    fm_config: Any = FeatureMapConfig(),
+    ansatz_config: Any = AnsatzConfig(),
+    backend: BackendName = BackendName.PYQTORCH,
+    diff_mode: DiffMode = DiffMode.AD,
+    measurement: Measurements | None = None,
+    noise: Noise | None = None,
+    configuration: BackendConfiguration | dict | None = None,
+    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
+) -> QNN:
+    """Create a QNN from a set of configurations.
+
+    Args:
+        register (int | Register): The number of qubits or a register object.
+        obs_config (list[ObservableConfig] | ObservableConfig): The configuration(s)
+            for the observable(s).
+        fm_config (FeatureMapConfig): The configuration for the feature map.
+            Defaults to no feature encoding block.
+        ansatz_config (AnsatzConfig): The configuration for the ansatz.
+            Defaults to a single layer of hardware efficient ansatz.
+        backend (BackendName): The chosen quantum backend.
+        diff_mode (DiffMode): The differentiation engine to use. Choices are
+            'gpsr' or 'ad'.
+        measurement (Measurements): Optional measurement protocol. If None,
+            use exact expectation value with a statevector simulator.
+        noise (Noise): A noise model to use.
+        configuration (BackendConfiguration | dict): Optional backend configuration.
+        input_diff_mode (InputDiffMode): The differentiation mode for the input tensor.
+
+    Returns:
+        A QNN object.
+
+    Raises:
+        ValueError: If the observable configuration is not provided.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
+    from qadence.ml_tools import QNN
+    from qadence.constructors import ObservableConfig
+    from qadence.operations import Z
+    from qadence.types import (
+        AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
+    )
+
+    register = 4
+    obs_config = ObservableConfig(
+        detuning=Z,
+        scale=5.0,
+        shift=0.0,
+        transformation_type=ObservableTransform.SCALE,
+        trainable_transform=None,
+    )
+    fm_config = FeatureMapConfig(
+        num_features=2,
+        inputs=["x", "y"],
+        basis_set=BasisSet.FOURIER,
+        reupload_scaling=ReuploadScaling.CONSTANT,
+        feature_range={
+            "x": (-1.0, 1.0),
+            "y": (0.0, 1.0),
+        },
+    )
+    ansatz_config = AnsatzConfig(
+        depth=2,
+        ansatz_type=AnsatzType.HEA,
+        ansatz_strategy=Strategy.DIGITAL,
+    )
+
+    qnn = QNN.from_configs(
+        register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
+    )
+
+    x = torch.rand(2, 2)
+    y = qnn(x)
+    print(str(y)) # markdown-exec: hide
+    ```
+    """
+    from .constructors import build_qnn_from_configs
+
+    return build_qnn_from_configs(
+        register=register,
+        observable_config=obs_config,
+        fm_config=fm_config,
+        ansatz_config=ansatz_config,
+        backend=backend,
+        diff_mode=diff_mode,
+        measurement=measurement,
+        noise=noise,
+        configuration=configuration,
+        input_diff_mode=input_diff_mode,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/operations/index.html b/v1.7.4/api/operations/index.html new file mode 100644 index 000000000..5fd89338a --- /dev/null +++ b/v1.7.4/api/operations/index.html @@ -0,0 +1,5291 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Operations - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Operations

+ +

Operations are common PrimitiveBlocks, these are often +called gates elsewhere.

+

Constant blocks

+ + +
+ + + +

+ X(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The X gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ Y(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The Y gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ Z(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The Z gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ I(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The identity gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ H(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The Hadamard or H gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    self.generator = (1 / np.sqrt(2)) * (X(target) + Z(target) - np.sqrt(2) * I(target))
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ S(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The S / Phase gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    self.generator = I(target) - Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ SDagger(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The Hermitian adjoint/conjugate transpose of the S / Phase gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    self.generator = I(target) - Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ SWAP(control, target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The SWAP gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, control: int, target: int) -> None:
+    a11 = 0.5 * (Z(control) - I(control))
+    a22 = -0.5 * (Z(target) + I(target))
+    a12 = 0.5 * (chain(X(control), Z(control)) + X(control))
+    a21 = 0.5 * (chain(Z(target), X(target)) + X(target))
+    self.generator = (
+        kron(-1.0 * a22, a11) + kron(-1.0 * a11, a22) + kron(a12, a21) + kron(a21, a12)
+    )
+    super().__init__((control, target))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ T(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The T gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    self.generator = I(target) - Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ TDagger(target) + +

+ + +
+

+ Bases: PrimitiveBlock

+ + +

The Hermitian adjoint/conjugate transpose of the T gate.

+ +
+ Source code in qadence/operations/primitive.py +
def __init__(self, target: int):
+    self.generator = I(target) - Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CNOT(control, target) + +

+ + +
+

+ Bases: ControlBlock

+ + +

The CNot, or CX, gate.

+ +
+ Source code in qadence/operations/control_ops.py +
38
+39
+40
def __init__(self, control: int, target: int) -> None:
+    self.generator = kron(N(control), X(target) - I(target))
+    super().__init__((control,), X(target))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+

CY gate not implemented

+
+ + +
+ + + +

+ CZ(control, target) + +

+ + +
+

+ Bases: MCZ

+ + +

The CZ gate.

+ +
+ Source code in qadence/operations/control_ops.py +
def __init__(self, control: int, target: int) -> None:
+    super().__init__((control,), target)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CPHASE(control, target, parameter) + +

+ + +
+

+ Bases: MCPHASE

+ + +

The CPHASE gate.

+ +
+ Source code in qadence/operations/control_ops.py +
def __init__(
+    self,
+    control: int,
+    target: int,
+    parameter: Parameter | TNumber | sympy.Expr | str,
+):
+    super().__init__((control,), target, parameter)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +

+

Parametrized blocks

+ + +
+ + + +

+ RX(target, parameter) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

The Rx gate.

+ +
+ Source code in qadence/operations/parametric.py +
59
+60
+61
+62
+63
+64
+65
def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap):
+    # TODO: should we give them more meaningful names? like 'angle'?
+    self.parameters = (
+        parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter)
+    )
+    self.generator = X(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ RY(target, parameter) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

The Ry gate.

+ +
+ Source code in qadence/operations/parametric.py +
87
+88
+89
+90
+91
+92
def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap):
+    self.parameters = (
+        parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter)
+    )
+    self.generator = Y(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ RZ(target, parameter) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

The Rz gate.

+ +
+ Source code in qadence/operations/parametric.py +
def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap):
+    self.parameters = (
+        parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter)
+    )
+    self.generator = Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CRX(control, target, parameter) + +

+ + +
+

+ Bases: MCRX

+ + +

The CRX gate.

+ +
+ Source code in qadence/operations/control_ops.py +
def __init__(
+    self,
+    control: int,
+    target: int,
+    parameter: Parameter | TNumber | sympy.Expr | str,
+):
+    super().__init__((control,), target, parameter)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CRY(control, target, parameter) + +

+ + +
+

+ Bases: MCRY

+ + +

The CRY gate.

+ +
+ Source code in qadence/operations/control_ops.py +
def __init__(
+    self,
+    control: int,
+    target: int,
+    parameter: TParameter,
+):
+    super().__init__((control,), target, parameter)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ CRZ(control, target, parameter) + +

+ + +
+

+ Bases: MCRZ

+ + +

The CRZ gate.

+ +
+ Source code in qadence/operations/control_ops.py +
def __init__(
+    self,
+    control: int,
+    target: int,
+    parameter: Parameter | TNumber | sympy.Expr | str,
+):
+    super().__init__((control,), target, parameter)
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ PHASE(target, parameter) + +

+ + +
+

+ Bases: ParametricBlock

+ + +

The Parametric Phase / S gate.

+ +
+ Source code in qadence/operations/parametric.py +
35
+36
+37
+38
def __init__(self, target: int, parameter: Parameter | TNumber | sympy.Expr | str):
+    self.parameters = ParamMap(parameter=parameter)
+    self.generator = I(target) - Z(target)
+    super().__init__((target,))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +

+

Hamiltonian Evolution

+ + +
+ + + +

+ HamEvo(generator, parameter, qubit_support=None, duration=None) + +

+ + +
+

+ Bases: TimeEvolutionBlock

+ + +

A block implementing the Hamiltonian evolution operation H where:

+
H = exp(-iG, t)
+
+

where G represents a square generator and t represents the time parameter +which can be parametrized.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
generator +
+

Either a AbstractBlock, torch.Tensor or numpy.ndarray.

+
+

+ + TYPE: + Union[TGenerator, AbstractBlock] + +

+
parameter +
+

A scalar or vector of numeric or torch.Tensor type.

+
+

+ + TYPE: + TParameter + +

+
qubit_support +
+

The qubits on which the evolution will be performed on.

+
+

+ + TYPE: + tuple[int, ...] + + + DEFAULT: + None + +

+
duration +
+

duration of evolution in case of time-dependent generator

+
+

+ + TYPE: + float | None + + + DEFAULT: + None + +

+
+

Examples:

+
from qadence import RX, HamEvo, run, PI
+import torch
+hevo = HamEvo(generator=RX(0, PI), parameter=torch.rand(2))
+print(run(hevo))
+# Now lets use a torch.Tensor as a generator, Now we have to pass the support
+gen = torch.rand(2,2, dtype=torch.complex128)
+hevo = HamEvo(generator=gen, parameter=torch.rand(2), qubit_support=(0,))
+print(run(hevo))
+
+
+ +
tensor([[ 1.0000-6.4237e-20j, -0.0010+6.7389e-23j],
+        [ 1.0015-3.3120e-18j, -0.0540+1.7871e-19j]])
+tensor([[1.0007-0.0026j, 0.0048-0.0048j],
+        [0.8092-0.4102j, 0.4762-0.7235j]])
+
+ +
+ +
+ Source code in qadence/operations/ham_evo.py +
def __init__(
+    self,
+    generator: Union[TGenerator, AbstractBlock],
+    parameter: TParameter,
+    qubit_support: tuple[int, ...] = None,
+    duration: float | None = None,
+):
+    gen_exprs = {}
+    if qubit_support is None and not isinstance(generator, AbstractBlock):
+        raise ValueError("You have to supply a qubit support for non-block generators.")
+    super().__init__(qubit_support if qubit_support else generator.qubit_support)
+    if isinstance(generator, AbstractBlock):
+        qubit_support = generator.qubit_support
+        if generator.is_parametric:
+            gen_exprs = {str(e): e for e in expressions(generator)}
+
+            if generator.is_time_dependent and duration is None:
+                raise ValueError("For time-dependent generators, a duration must be specified.")
+
+    elif isinstance(generator, torch.Tensor):
+        msg = "Please provide a square generator."
+        if len(generator.shape) == 2:
+            assert generator.shape[0] == generator.shape[1], msg
+        elif len(generator.shape) == 3:
+            assert generator.shape[1] == generator.shape[2], msg
+            assert generator.shape[0] == 1, "Qadence doesnt support batched generators."
+        else:
+            raise TypeError(
+                "Only 2D or 3D generators are supported.\
+                            In case of a 3D generator, the batch dim\
+                            is expected to be at dim 0."
+            )
+        gen_exprs = {str(generator.__hash__()): generator}
+    elif isinstance(generator, (sympy.Basic, sympy.Array)):
+        gen_exprs = {str(generator): generator}
+    else:
+        raise TypeError(
+            f"Generator of type {type(generator)} not supported.\
+                        If you're using a numpy.ndarray, please cast it to a torch tensor."
+        )
+    ps = {"parameter": Parameter(parameter), **gen_exprs}
+    self.parameters = ParamMap(**ps)
+    self.generator = generator
+    self.duration = duration
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ digital_decomposition(approximation=LTSOrder.ST4) + +

+ + +
+ +

Decompose the Hamiltonian evolution into digital gates.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
approximation +
+

Choose the type of decomposition. Defaults to "st4". +Available types are: +* 'basic' = apply first-order Trotter formula and decompose each term of + the exponential into digital gates. It is exact only if applied to an + operator whose terms are mutually commuting. +* 'st2' = Trotter-Suzuki 2nd order formula for approximating non-commuting + Hamiltonians. +* 'st4' = Trotter-Suzuki 4th order formula for approximating non-commuting + Hamiltonians.

+
+

+ + TYPE: + str + + + DEFAULT: + ST4 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ AbstractBlock + +
+

a block with the digital decomposition

+
+

+ + TYPE: + AbstractBlock + +

+
+ +
+ Source code in qadence/operations/ham_evo.py +
def digital_decomposition(self, approximation: LTSOrder = LTSOrder.ST4) -> AbstractBlock:
+    """Decompose the Hamiltonian evolution into digital gates.
+
+    Args:
+        approximation (str, optional): Choose the type of decomposition. Defaults to "st4".
+            Available types are:
+            * 'basic' = apply first-order Trotter formula and decompose each term of
+                the exponential into digital gates. It is exact only if applied to an
+                operator whose terms are mutually commuting.
+            * 'st2' = Trotter-Suzuki 2nd order formula for approximating non-commuting
+                Hamiltonians.
+            * 'st4' = Trotter-Suzuki 4th order formula for approximating non-commuting
+                Hamiltonians.
+
+    Returns:
+        AbstractBlock: a block with the digital decomposition
+    """
+
+    # psi(t) = exp(-i * H * t * psi0)
+    # psi(t) = exp(-i * lambda * t * psi0)
+    # H = sum(Paulin) + sum(Pauli1*Pauli2)
+    logger.info("Quantum simulation of the time-independent Schrödinger equation.")
+
+    blocks = []
+
+    # how to change the type/dict to enum effectively
+
+    # when there is a term including non-commuting matrices use st2 or st4
+
+    # 1) should check that the given generator respects the constraints
+    # single-qubit gates
+
+    assert isinstance(
+        self.generator, AbstractBlock
+    ), "Only a generator represented as a block can be decomposed"
+
+    if block_is_qubit_hamiltonian(self.generator):
+        try:
+            block_is_commuting_hamiltonian(self.generator)
+            approximation = LTSOrder.BASIC  # use the simpler approach if the H is commuting
+        except TypeError:
+            logger.warning(
+                """Non-commuting terms in the Pauli operator.
+                The Suzuki-Trotter approximation is applied."""
+            )
+
+        blocks.extend(
+            lie_trotter_suzuki(
+                block=self.generator,
+                parameter=self.parameters.parameter,
+                order=LTSOrder[approximation],
+            )
+        )
+
+        # 2) return an AbstractBlock instance with the set of gates
+        # resulting from the decomposition
+
+        return chain(*blocks)
+    else:
+        raise NotImplementedError(
+            "The current digital decomposition can be applied only to Pauli Hamiltonians."
+        )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ AnalogSWAP(control, target, parameter=3 * PI / 4) + +

+ + +
+

+ Bases: HamEvo

+ + +

Single time-independent Hamiltonian evolution over a Rydberg Ising.

+

hamiltonian yielding a SWAP (up to global phase).

+

Derived from +Bapat et al. +where it is applied to XX-type Hamiltonian

+ +
+ Source code in qadence/operations/analog.py +
48
+49
+50
+51
+52
+53
+54
+55
+56
def __init__(self, control: int, target: int, parameter: TParameter = 3 * PI / 4):
+    rydberg_ising_hamiltonian_generator = (
+        4.0 * kron((I(control) - Z(control)) / 2.0, (I(target) - Z(target)) / 2.0)
+        + (2.0 / 3.0) * np.sqrt(2.0) * X(control)
+        + (2.0 / 3.0) * np.sqrt(2.0) * X(target)
+        + (1.0 + np.sqrt(5.0) / 3) * Z(control)
+        + (1.0 + np.sqrt(5.0) / 3) * Z(target)
+    )
+    super().__init__(rydberg_ising_hamiltonian_generator, parameter, (control, target))
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+

AnalogSWAP should be turned into a proper analog block

+
+
+

Analog blocks

+ + +
+ + +

+ AnalogRX(angle, qubit_support='global', add_pattern=True) + +

+ + +
+ +

Analog X rotation.

+

Shorthand for AnalogRot:

+
φ=2.4; Ω=π; t = φ/Ω * 1000
+AnalogRot(duration=t, omega=Ω)
+
+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
angle +
+

Rotation angle [rad]

+
+

+ + TYPE: + float | str | Parameter + +

+
qubit_support +
+

Defines the (local/global) qubit support

+
+

+ + TYPE: + str | QubitSupport | Tuple + + + DEFAULT: + 'global' + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConstantAnalogRotation + + +
+

ConstantAnalogRotation

+
+
+ +
+ Source code in qadence/operations/analog.py +
def AnalogRX(
+    angle: float | str | Parameter,
+    qubit_support: str | QubitSupport | Tuple = "global",
+    add_pattern: bool = True,
+) -> ConstantAnalogRotation:
+    """Analog X rotation.
+
+    Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]:
+
+    ```python
+    φ=2.4; Ω=π; t = φ/Ω * 1000
+    AnalogRot(duration=t, omega=Ω)
+    ```
+
+    Arguments:
+        angle: Rotation angle [rad]
+        qubit_support: Defines the (local/global) qubit support
+
+    Returns:
+        ConstantAnalogRotation
+    """
+    return _analog_rot(angle, qubit_support, phase=0, add_pattern=add_pattern)
+
+
+
+ +
+ +
+ + +

+ AnalogRY(angle, qubit_support='global', add_pattern=True) + +

+ + +
+ +

Analog Y rotation.

+

Shorthand for AnalogRot:

+

φ=2.4; Ω=π; t = φ/Ω * 1000
+AnalogRot(duration=t, omega=Ω, phase=-π/2)
+
+Arguments: + angle: Rotation angle [rad] + qubit_support: Defines the (local/global) qubit support

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConstantAnalogRotation + + +
+

ConstantAnalogRotation

+
+
+ +
+ Source code in qadence/operations/analog.py +
def AnalogRY(
+    angle: float | str | Parameter,
+    qubit_support: str | QubitSupport | Tuple = "global",
+    add_pattern: bool = True,
+) -> ConstantAnalogRotation:
+    """Analog Y rotation.
+
+    Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]:
+
+    ```python
+    φ=2.4; Ω=π; t = φ/Ω * 1000
+    AnalogRot(duration=t, omega=Ω, phase=-π/2)
+    ```
+    Arguments:
+        angle: Rotation angle [rad]
+        qubit_support: Defines the (local/global) qubit support
+
+    Returns:
+        ConstantAnalogRotation
+    """
+    return _analog_rot(angle, qubit_support, phase=-PI / 2, add_pattern=add_pattern)
+
+
+
+ +
+ +
+ + +

+ AnalogRZ(angle, qubit_support='global', add_pattern=True) + +

+ + +
+ +

Analog Z rotation. Shorthand for AnalogRot: +

φ=2.4; δ=π; t = φ/δ * 100)
+AnalogRot(duration=t, delta=δ, phase=π/2)
+

+ +
+ Source code in qadence/operations/analog.py +
def AnalogRZ(
+    angle: float | str | Parameter,
+    qubit_support: str | QubitSupport | Tuple = "global",
+    add_pattern: bool = True,
+) -> ConstantAnalogRotation:
+    """Analog Z rotation. Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]:
+    ```
+    φ=2.4; δ=π; t = φ/δ * 100)
+    AnalogRot(duration=t, delta=δ, phase=π/2)
+    ```
+    """
+    q = _cast(QubitSupport, qubit_support)
+    alpha = _cast(Parameter, angle)
+    delta = PI
+    omega = 0
+    duration = alpha / delta * 1000
+    h_norm = sympy.sqrt(omega**2 + delta**2)
+    ps = ParamMap(
+        alpha=alpha, duration=duration, omega=omega, delta=delta, phase=0.0, h_norm=h_norm
+    )
+    return ConstantAnalogRotation(qubit_support=q, parameters=ps, add_pattern=add_pattern)
+
+
+
+ +
+ +
+ + +

+ AnalogRot(duration, omega=0, delta=0, phase=0, qubit_support='global', add_pattern=True) + +

+ + +
+ +

General analog rotation operation.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
duration +
+

Duration of the rotation [ns].

+
+

+ + TYPE: + float | str | Parameter + +

+
omega +
+

Rotation frequency [rad/μs]

+
+

+ + TYPE: + float | str | Parameter + + + DEFAULT: + 0 + +

+
delta +
+

Rotation frequency [rad/μs]

+
+

+ + TYPE: + float | str | Parameter + + + DEFAULT: + 0 + +

+
phase +
+

Phase angle [rad]

+
+

+ + TYPE: + float | str | Parameter + + + DEFAULT: + 0 + +

+
qubit_support +
+

Defines the (local/global) qubit support

+
+

+ + TYPE: + str | QubitSupport | Tuple + + + DEFAULT: + 'global' + +

+
add_pattern +
+

False disables the semi-local addressing pattern +for the execution of this specific block.

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ConstantAnalogRotation + + +
+

ConstantAnalogRotation

+
+
+ +
+ Source code in qadence/operations/analog.py +
def AnalogRot(
+    duration: float | str | Parameter,
+    omega: float | str | Parameter = 0,
+    delta: float | str | Parameter = 0,
+    phase: float | str | Parameter = 0,
+    qubit_support: str | QubitSupport | Tuple = "global",
+    add_pattern: bool = True,
+) -> ConstantAnalogRotation:
+    """General analog rotation operation.
+
+    Arguments:
+        duration: Duration of the rotation [ns].
+        omega: Rotation frequency [rad/μs]
+        delta: Rotation frequency [rad/μs]
+        phase: Phase angle [rad]
+        qubit_support: Defines the (local/global) qubit support
+        add_pattern: False disables the semi-local addressing pattern
+            for the execution of this specific block.
+
+    Returns:
+        ConstantAnalogRotation
+    """
+
+    if omega == 0 and delta == 0:
+        raise ValueError("Parameters omega and delta cannot both be 0.")
+
+    q = _cast(QubitSupport, qubit_support)
+    duration = Parameter(duration)
+    omega = Parameter(omega)
+    delta = Parameter(delta)
+    phase = Parameter(phase)
+    h_norm = sympy.sqrt(omega**2 + delta**2)
+    alpha = duration * h_norm / 1000
+    ps = ParamMap(
+        alpha=alpha, duration=duration, omega=omega, delta=delta, phase=phase, h_norm=h_norm
+    )
+    return ConstantAnalogRotation(parameters=ps, qubit_support=q, add_pattern=add_pattern)
+
+
+
+ +
+ +
+ + +

+ AnalogInteraction(duration, qubit_support='global', add_pattern=True) + +

+ + +
+ +

Evolution of the interaction term for a register of qubits.

+

Constructs a InteractionBlock.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
duration +
+

Time to evolve the interaction for in nanoseconds.

+
+

+ + TYPE: + TNumber | Basic + +

+
qubit_support +
+

Qubits the InteractionBlock is applied to. Can be either +"global" to evolve the interaction block to all qubits or a tuple of integers.

+
+

+ + TYPE: + str | QubitSupport | tuple + + + DEFAULT: + 'global' + +

+
add_pattern +
+

False disables the semi-local addressing pattern +for the execution of this specific block.

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + InteractionBlock + + +
+

a InteractionBlock

+
+
+ +
+ Source code in qadence/operations/analog.py +
63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
def AnalogInteraction(
+    duration: TNumber | sympy.Basic,
+    qubit_support: str | QubitSupport | tuple = "global",
+    add_pattern: bool = True,
+) -> InteractionBlock:
+    """Evolution of the interaction term for a register of qubits.
+
+    Constructs a [`InteractionBlock`][qadence.blocks.analog.InteractionBlock].
+
+    Arguments:
+        duration: Time to evolve the interaction for in nanoseconds.
+        qubit_support: Qubits the `InteractionBlock` is applied to. Can be either
+            `"global"` to evolve the interaction block to all qubits or a tuple of integers.
+        add_pattern: False disables the semi-local addressing pattern
+            for the execution of this specific block.
+
+    Returns:
+        a `InteractionBlock`
+    """
+    q = _cast(QubitSupport, qubit_support)
+    ps = ParamMap(duration=duration)
+    return InteractionBlock(parameters=ps, qubit_support=q, add_pattern=add_pattern)
+
+
+
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/parameters/index.html b/v1.7.4/api/parameters/index.html new file mode 100644 index 000000000..985bb04cf --- /dev/null +++ b/v1.7.4/api/parameters/index.html @@ -0,0 +1,3793 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Parameters - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Parameters

+ +

Parameters

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ ParamMap(**kwargs) + +

+ + +
+ + +

Connects UUIDs of parameters to their expressions and names.

+

This class is not user-facing +and only needed for more complex block definitions. It provides convenient access to +expressions/UUIDs/names needed in different backends.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
kwargs +
+

Parameters.

+
+

+ + TYPE: + str | TNumber | Tensor | Basic | Parameter + + + DEFAULT: + {} + +

+
+

Example: +

import sympy
+from qadence.parameters import ParamMap
+
+(x,y) = sympy.symbols("x y")
+ps = ParamMap(omega=2.0, duration=x+y)
+
+print(f"{ps.names() = }")
+print(f"{ps.expressions() = }")
+print(f"{ps.uuids() = }")
+
+
+ +
ps.names() = dict_keys(['omega', 'duration'])
+ps.expressions() = dict_values([2.00000000000000, x + y])
+ps.uuids() = dict_keys(['712c506d-a206-41d4-89c0-af58a5e1bd92', '71f17219-a0a1-44df-9c28-385ec699ac82'])
+
+ +

+ +
+ Source code in qadence/parameters.py +
def __init__(self, **kwargs: str | TNumber | Tensor | Basic | Parameter):
+    self._name_dict: dict[str, tuple[str, Basic]] = {}
+    self._uuid_dict: dict[str, str] = {}
+    for name, v in kwargs.items():
+        param = v if isinstance(v, sympy.Basic) else Parameter(v)
+        uuid = str(uuid4())
+        self._name_dict[name] = (uuid, param)
+        self._uuid_dict[uuid] = param
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ Parameter + + +

+ + +
+

+ Bases: Symbol

+ + +

A wrapper on top of sympy.Symbol.

+

Includes two additional keywords: trainable and value. +This class is to define both feature parameter and variational parameters.

+ + + + +
+ + + + + + + +
+ + + +

+ trainable: bool + + + instance-attribute + + +

+ + +
+ +

Trainable parameters are variational parameters.

+

Non-trainable parameters are feature +parameters.

+
+ +
+ +
+ + + +

+ value: TNumber + + + instance-attribute + + +

+ + +
+ +

(Initial) value of the parameter.

+
+ +
+ + + +
+ + +

+ __new__(name, **assumptions) + +

+ + +
+ +

Arguments:

+
name: When given a string only, the class
+    constructs a trainable Parameter with a a randomly initialized value.
+**assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
+    kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.
+
+

Example: +

from qadence.parameters import Parameter, VariationalParameter
+
+theta = Parameter("theta")
+print(f"{theta}: trainable={theta.trainable} value={theta.value}")
+assert not theta.is_number
+
+# you can specify both trainable/value in the constructor
+theta = Parameter("theta", trainable=True, value=2.0)
+print(f"{theta}: trainable={theta.trainable} value={theta.value}")
+
+# VariationalParameter/FeatureParameter are constructing
+# trainable/untrainable Parameters
+theta = VariationalParameter("theta", value=2.0)
+assert theta == Parameter("theta", trainable=True, value=2.0)
+
+# When provided with a numeric type, Parameter constructs a sympy numeric type":
+constant_zero = Parameter(0)
+assert constant_zero.is_number
+
+# When passed a Parameter or a sympy expression, it just returns it.
+expr = Parameter("x") * Parameter("y")
+print(f"{expr=} : {expr.free_symbols}")
+
+
+ +
theta: trainable=True value=0.27488978038386536
+theta: trainable=True value=2.0
+expr=x*y : {y, x}
+
+ +

+ +
+ Source code in qadence/parameters.py +
def __new__(
+    cls, name: str | TNumber | Tensor | Basic | Parameter, **assumptions: Any
+) -> Parameter | Basic | Expr | Array:
+    """
+    Arguments:
+
+        name: When given a string only, the class
+            constructs a trainable Parameter with a a randomly initialized value.
+        **assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
+            kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.parameters import Parameter, VariationalParameter
+
+    theta = Parameter("theta")
+    print(f"{theta}: trainable={theta.trainable} value={theta.value}")
+    assert not theta.is_number
+
+    # you can specify both trainable/value in the constructor
+    theta = Parameter("theta", trainable=True, value=2.0)
+    print(f"{theta}: trainable={theta.trainable} value={theta.value}")
+
+    # VariationalParameter/FeatureParameter are constructing
+    # trainable/untrainable Parameters
+    theta = VariationalParameter("theta", value=2.0)
+    assert theta == Parameter("theta", trainable=True, value=2.0)
+
+    # When provided with a numeric type, Parameter constructs a sympy numeric type":
+    constant_zero = Parameter(0)
+    assert constant_zero.is_number
+
+    # When passed a Parameter or a sympy expression, it just returns it.
+    expr = Parameter("x") * Parameter("y")
+    print(f"{expr=} : {expr.free_symbols}")
+    ```
+    """
+    p: Parameter
+    if isinstance(name, get_args(TNumber)):
+        return sympify(name)
+    elif isinstance(name, Tensor):
+        if name.numel() == 1:
+            return sympify(name)
+        else:
+            return Array(name.detach().numpy())
+    elif isinstance(name, Parameter):
+        p = super().__new__(cls, name.name, **assumptions)
+        p.name = name.name
+        p.trainable = name.trainable
+        p.value = name.value
+        p.is_time = name.is_time
+        return p
+    elif isinstance(name, (Basic, Expr)):
+        if name.is_number:
+            return sympify(evaluate(name))
+        return name
+    elif isinstance(name, str):
+        p = super().__new__(cls, name, **assumptions)
+        p.trainable = assumptions.get("trainable", True)
+        p.value = assumptions.get("value", None)
+        p.is_time = assumptions.get("is_time", False)
+        if p.value is None:
+            p.value = rand(1).item()
+        return p
+    else:
+        raise TypeError(f"Parameter does not support type {type(name)}")
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ FeatureParameter(name, **kwargs) + +

+ + +
+ +

Shorthand for Parameter(..., trainable=False).

+ +
+ Source code in qadence/parameters.py +
def FeatureParameter(name: str, **kwargs: Any) -> Parameter:
+    """Shorthand for `Parameter(..., trainable=False)`."""
+    return Parameter(name, trainable=False, **kwargs)
+
+
+
+ +
+ +
+ + +

+ TimeParameter(name) + +

+ + +
+ +

Shorthand for Parameter(..., trainable=False, is_time=True).

+ +
+ Source code in qadence/parameters.py +
def TimeParameter(name: str) -> Parameter:
+    """Shorthand for `Parameter(..., trainable=False, is_time=True)`."""
+    return Parameter(name, trainable=False, is_time=True)
+
+
+
+ +
+ +
+ + +

+ VariationalParameter(name, **kwargs) + +

+ + +
+ +

Shorthand for Parameter(..., trainable=True).

+ +
+ Source code in qadence/parameters.py +
def VariationalParameter(name: str, **kwargs: Any) -> Parameter:
+    """Shorthand for `Parameter(..., trainable=True)`."""
+    return Parameter(name, trainable=True, **kwargs)
+
+
+
+ +
+ +
+ + +

+ evaluate(expr, values={}, as_torch=False) + +

+ + +
+ +

Arguments:

+
expr: An expression consisting of Parameters.
+values: values dict which contains values for the Parameters,
+    if empty, Parameter.value will be used.
+as_torch: Whether to retrieve a torch-differentiable expression result.
+
+

Example: +

from qadence.parameters import Parameter, evaluate
+
+expr = Parameter("x") * Parameter("y")
+
+# Unless specified, Parameter initialized random values
+# Lets evaluate this expression and see what the result is
+res = evaluate(expr)
+print(res)
+
+# We can also evaluate the expr using a custom dict
+d = {"x": 1, "y":2}
+res = evaluate(expr, d)
+print(res)
+
+# Lastly, if we want a differentiable result, lets put the as_torch flag
+res = evaluate(expr, d, as_torch=True)
+print(res)
+
+
+ +
0.1692043236060967
+2.0
+tensor([2])
+
+ +

+ +
+ Source code in qadence/parameters.py +
def evaluate(expr: Expr, values: dict = {}, as_torch: bool = False) -> TNumber | Tensor:
+    """
+    Arguments:
+
+        expr: An expression consisting of Parameters.
+        values: values dict which contains values for the Parameters,
+            if empty, Parameter.value will be used.
+        as_torch: Whether to retrieve a torch-differentiable expression result.
+
+    Example:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.parameters import Parameter, evaluate
+
+    expr = Parameter("x") * Parameter("y")
+
+    # Unless specified, Parameter initialized random values
+    # Lets evaluate this expression and see what the result is
+    res = evaluate(expr)
+    print(res)
+
+    # We can also evaluate the expr using a custom dict
+    d = {"x": 1, "y":2}
+    res = evaluate(expr, d)
+    print(res)
+
+    # Lastly, if we want a differentiable result, lets put the as_torch flag
+    res = evaluate(expr, d, as_torch=True)
+    print(res)
+    ```
+    """
+    res: Basic
+    res_value: TNumber | Tensor
+    query: dict[Parameter, TNumber | Tensor] = {}
+    if isinstance(expr, Array):
+        return Tensor(expr.tolist())
+    else:
+        if not expr.is_number:
+            for s in expr.free_symbols:
+                if s.name in values.keys():
+                    query[s] = values[s.name]
+                elif hasattr(s, "value"):
+                    query[s] = s.value
+                else:
+                    raise ValueError(f"No value provided for symbol {s.name}")
+        if as_torch:
+            res_value = make_differentiable(expr)(**{s.name: tensor(v) for s, v in query.items()})
+        else:
+            res = expr.subs(query)
+            res_value = sympy_to_numeric(res)
+        return res_value
+
+
+
+ +
+ +
+ + +

+ extract_original_param_entry(param) + +

+ + +
+ +

Given an Expression, what was the original "param" given by the user? It is either.

+

going to be a numeric value, or a sympy Expression (in case a string was given, +it was converted via Parameter("string").

+ +
+ Source code in qadence/parameters.py +
def extract_original_param_entry(
+    param: Expr,
+) -> TNumber | Tensor | Expr:
+    """
+    Given an Expression, what was the original "param" given by the user? It is either.
+
+    going to be a numeric value, or a sympy Expression (in case a string was given,
+    it was converted via Parameter("string").
+    """
+    return param if not param.is_number else evaluate(param)
+
+
+
+ +
+ + + +
+ +
+ +

Parameter embedding

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ embedding(block, to_gate_params=False, engine=Engine.TORCH) + +

+ + +
+ +

Construct embedding function which maps user-facing parameters to either expression-level.

+

parameters or gate-level parameters. The constructed embedding function has the signature:

+
 embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
+
+

which means that it maps the variational parameter dict params and the feature parameter +dict inputs to one new parameter dict embedded_dict which holds all parameters that are +needed to execute a circuit on a given backend. There are two different modes for this +mapping:

+
    +
  • Expression-level parameters: For AD-based optimization. For every unique expression we end + up with one entry in the embedded dict: + len(embedded_dict) == len(unique_parameter_expressions).
  • +
  • Gate-level parameters: For PSR-based optimization or real devices. One parameter for each + gate parameter, regardless if they are based on the same expression. len(embedded_dict) == + len(parametric_gates). This is needed because PSR requires to shift the angles of every + gate where the same parameter appears.
  • +
+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
block +
+

parametrized block into which we want to embed parameters.

+
+

+ + TYPE: + AbstractBlock + +

+
to_gate_params +
+

A boolean flag whether to generate gate-level parameters or +expression-level parameters.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType]] + + +
+

A tuple with variational parameter dict and the embedding function.

+
+
+ +
+ Source code in qadence/blocks/embedding.py +
def embedding(
+    block: AbstractBlock, to_gate_params: bool = False, engine: Engine = Engine.TORCH
+) -> tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType],]:
+    """Construct embedding function which maps user-facing parameters to either *expression-level*.
+
+    parameters or *gate-level* parameters. The constructed embedding function has the signature:
+
+         embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
+
+    which means that it maps the *variational* parameter dict `params` and the *feature* parameter
+    dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are
+    needed to execute a circuit on a given backend. There are two different *modes* for this
+    mapping:
+
+    - *Expression-level* parameters: For AD-based optimization. For every unique expression we end
+      up with one entry in the embedded dict:
+      `len(embedded_dict) == len(unique_parameter_expressions)`.
+    - *Gate-level* parameters: For PSR-based optimization or real devices. One parameter for each
+      gate parameter, regardless if they are based on the same expression. `len(embedded_dict) ==
+      len(parametric_gates)`. This is needed because PSR requires to shift the angles of **every**
+      gate where the same parameter appears.
+
+    Arguments:
+        block: parametrized block into which we want to embed parameters.
+        to_gate_params: A boolean flag whether to generate gate-level parameters or
+            expression-level parameters.
+
+    Returns:
+        A tuple with variational parameter dict and the embedding function.
+    """
+    concretize_parameter = _concretize_parameter(engine)
+    if engine == Engine.TORCH:
+        cast_dtype = tensor
+    else:
+        from jax.numpy import array
+
+        cast_dtype = array
+
+    unique_expressions = unique(expressions(block))
+    unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)]
+    unique_const_matrices = [e for e in unique_expressions if isinstance(e, sympy.Array)]
+    unique_expressions = [e for e in unique_expressions if not isinstance(e, sympy.Array)]
+
+    # NOTE
+    # there are 3 kinds of parameters in qadence
+    # - non-trainable which are considered as inputs for classical data
+    # - trainable which are the variational parameters to be optimized
+    # - fixed: which are non-trainable parameters with fixed value (e.g. pi/2)
+    #
+    # both non-trainable and trainable parameters can have the same element applied
+    # to different operations in the quantum circuit, e.g. assigning the same parameter
+    # to multiple gates.
+    non_numeric_symbols = [p for p in unique_symbols if not p.is_number]
+    trainable_symbols = [p for p in non_numeric_symbols if p.trainable]
+    constant_expressions = [expr for expr in unique_expressions if expr.is_number]
+    # we dont need to care about constant symbols if they are contained in an symbolic expression
+    # we only care about gate params which are ONLY a constant
+
+    embeddings: dict[sympy.Expr, DifferentiableExpression] = {
+        expr: make_differentiable(expr=expr, engine=engine)
+        for expr in unique_expressions
+        if not expr.is_number
+    }
+
+    uuid_to_expr = uuid_to_expression(block)
+
+    def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
+        embedded_params: dict[sympy.Expr, ArrayLike] = {}
+        for expr, fn in embeddings.items():
+            angle: ArrayLike
+            values = {}
+            for symbol in expr.free_symbols:
+                if not symbol.is_time:
+                    if symbol.name in inputs:
+                        value = inputs[symbol.name]
+                    elif symbol.name in params:
+                        value = params[symbol.name]
+                    else:
+                        msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
+                        raise KeyError(
+                            f"{msg_trainable} parameter '{symbol.name}' not found in the "
+                            f"inputs list: {list(inputs.keys())} nor the "
+                            f"params list: {list(params.keys())}."
+                        )
+                    values[symbol.name] = value
+                else:
+                    values[symbol.name] = tensor(1.0)
+            angle = fn(**values)
+            # do not reshape parameters which are multi-dimensional
+            # tensors, such as for example generator matrices
+            if not len(angle.squeeze().shape) > 1:
+                angle = angle.reshape(-1)
+            embedded_params[expr] = angle
+
+        for e in constant_expressions + unique_const_matrices:
+            embedded_params[e] = params[stringify(e)]
+
+        if to_gate_params:
+            gate_lvl_params: ParamDictType = {}
+            for uuid, e in uuid_to_expr.items():
+                gate_lvl_params[uuid] = embedded_params[e]
+            return gate_lvl_params
+        else:
+            out = {stringify(k): v for k, v in embedded_params.items()}
+            out.update({"orig_param_values": inputs})
+            return out
+
+    params: ParamDictType
+    params = {
+        p.name: concretize_parameter(value=p.value, trainable=True) for p in trainable_symbols
+    }
+    params.update(
+        {
+            stringify(expr): concretize_parameter(value=evaluate(expr), trainable=False)
+            for expr in constant_expressions
+        }
+    )
+    params.update(
+        {
+            stringify(expr): cast_dtype(nparray(expr.tolist(), dtype=npcdouble))
+            for expr in unique_const_matrices
+        }
+    )
+    return params, embedding_fn
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/quantumcircuit/index.html b/v1.7.4/api/quantumcircuit/index.html new file mode 100644 index 000000000..37486742b --- /dev/null +++ b/v1.7.4/api/quantumcircuit/index.html @@ -0,0 +1,2990 @@ + + + + + + + + + + + + + + + + + + + + + + + + + QuantumCircuit - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

QuantumCircuit

+ +

QuantumCircuit

+

The abstract QuantumCircuit is the key object in Qadence, as it is what can be executed.

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QuantumCircuit(support, *blocks) + + + dataclass + + +

+ + +
+ + +

Am abstract QuantumCircuit instance.

+

It needs to be passed to a quantum backend for execution.

+ +

Arguments:

+
support: `Register` or number of qubits. If an integer is provided, a register is
+    constructed with `Register.all_to_all(x)`
+*blocks: (Possibly multiple) blocks to construct the circuit from.
+
+ +
+ Source code in qadence/circuit.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
def __init__(self, support: int | Register, *blocks: AbstractBlock):
+    """
+    Arguments:
+
+        support: `Register` or number of qubits. If an integer is provided, a register is
+            constructed with `Register.all_to_all(x)`
+        *blocks: (Possibly multiple) blocks to construct the circuit from.
+    """
+    self.block = chain(*blocks) if len(blocks) != 1 else blocks[0]
+    self.register = Register(support) if isinstance(support, int) else support
+
+    global_block = isinstance(self.block, AnalogBlock) and self.block.qubit_support.is_global
+    if not global_block and len(self.block) and self.block.n_qubits > self.register.n_qubits:
+        raise ValueError(
+            f"Register with {self.register.n_qubits} qubits is too small for the "
+            f"given block with {self.block.n_qubits} qubits"
+        )
+
+
+ + + +
+ + + + + + + +
+ + + +

+ unique_parameters: list[Parameter] + + + property + + +

+ + +
+ +

Return the unique parameters in the circuit.

+

These parameters are the actual user-facing parameters which +can be assigned by the user. Multiple gates can contain the +same unique parameter

+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[Parameter] + + +
+

list[Parameter]: List of unique parameters in the circuit

+
+
+
+ +
+ + + +
+ + +

+ dagger() + +

+ + +
+ +

Reverse the QuantumCircuit by calling dagger on the block.

+ +
+ Source code in qadence/circuit.py +
def dagger(self) -> QuantumCircuit:
+    """Reverse the QuantumCircuit by calling dagger on the block."""
+    return QuantumCircuit(self.n_qubits, self.block.dagger())
+
+
+
+ +
+ +
+ + +

+ get_blocks_by_tag(tag) + +

+ + +
+ +

Extract one or more blocks using the human-readable tag.

+

This function recursively explores all composite blocks to find +all the occurrences of a certain tag in the blocks.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
tag +
+

the tag to look for

+
+

+ + TYPE: + str + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[AbstractBlock] + + +
+

list[AbstractBlock]: The block(s) corresponding to the given tag

+
+
+ +
+ Source code in qadence/circuit.py +
def get_blocks_by_tag(self, tag: str) -> list[AbstractBlock]:
+    """Extract one or more blocks using the human-readable tag.
+
+    This function recursively explores all composite blocks to find
+    all the occurrences of a certain tag in the blocks.
+
+    Args:
+        tag (str): the tag to look for
+
+    Returns:
+        list[AbstractBlock]: The block(s) corresponding to the given tag
+    """
+
+    def _get_block(block: AbstractBlock) -> list[AbstractBlock]:
+        blocks = []
+        if block.tag == tag:
+            blocks += [block]
+        if isinstance(block, CompositeBlock):
+            blocks += flatten(*[_get_block(b) for b in block.blocks])
+        return blocks
+
+    return _get_block(self.block)
+
+
+
+ +
+ +
+ + +

+ parameters() + +

+ + +
+ +

Extract all parameters for primitive blocks in the circuit.

+

Notice that this function returns all the unique Parameters used +in the quantum circuit. These can correspond to constants too.

+ + + + + + + + + + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + list[Parameter | Basic] | list[tuple[Parameter | Basic, ...]] + + +
+

List[tuple[Parameter]]: A list of tuples containing the Parameter

+
+
+ + list[Parameter | Basic] | list[tuple[Parameter | Basic, ...]] + + +
+

instance of each of the primitive blocks in the circuit or, if the flatten

+
+
+ + list[Parameter | Basic] | list[tuple[Parameter | Basic, ...]] + + +
+

flag is set to True, a flattened list of all circuit parameters

+
+
+ +
+ Source code in qadence/circuit.py +
def parameters(self) -> list[Parameter | Basic] | list[tuple[Parameter | Basic, ...]]:
+    """Extract all parameters for primitive blocks in the circuit.
+
+    Notice that this function returns all the unique Parameters used
+    in the quantum circuit. These can correspond to constants too.
+
+    Returns:
+        List[tuple[Parameter]]: A list of tuples containing the Parameter
+        instance of each of the primitive blocks in the circuit or, if the `flatten`
+        flag is set to True, a flattened list of all circuit parameters
+    """
+    return parameters(self.block)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/register/index.html b/v1.7.4/api/register/index.html new file mode 100644 index 000000000..ee95bbe37 --- /dev/null +++ b/v1.7.4/api/register/index.html @@ -0,0 +1,4031 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Register - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Register

+ +

Quantum Registers

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ Register(support, spacing=1.0, device_specs=DEFAULT_DEVICE) + +

+ + +
+ + +

A register of qubits including 2D coordinates.

+

Instantiating the Register class directly is only recommended for building custom registers. +For most uses where a predefined lattice is desired it is recommended to use the various +class methods available, e.g. Register.triangular_lattice.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
support +
+

A NetworkX graph or number of qubits. Nodes can include a "pos" attribute +such that e.g.: graph.nodes = {0: {"pos": (2,3)}, 1: {"pos": (0,0)}, ...} which +will be used in backends that need qubit coordinates. Passing a number of qubits +calls Register.all_to_all(n_qubits).

+
+

+ + TYPE: + Graph | int + +

+
spacing +
+

Value set as the distance between the two closest qubits. The spacing +argument is also available for all the class method constructors.

+
+

+ + TYPE: + float | None + + + DEFAULT: + 1.0 + +

+
+

Examples: +

from qadence import Register
+
+reg_all = Register.all_to_all(n_qubits = 4)
+reg_line = Register.line(n_qubits = 4)
+reg_circle = Register.circle(n_qubits = 4)
+reg_squre = Register.square(qubits_side = 2)
+reg_rect = Register.rectangular_lattice(qubits_row = 2, qubits_col = 2)
+reg_triang = Register.triangular_lattice(n_cells_row = 2, n_cells_col = 2)
+reg_honey = Register.honeycomb_lattice(n_cells_row = 2, n_cells_col = 2)
+
+
+ + + +

+ +
+ Source code in qadence/register.py +
34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
def __init__(
+    self,
+    support: nx.Graph | int,
+    spacing: float | None = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+):
+    """
+    A register of qubits including 2D coordinates.
+
+    Instantiating the Register class directly is only recommended for building custom registers.
+    For most uses where a predefined lattice is desired it is recommended to use the various
+    class methods available, e.g. `Register.triangular_lattice`.
+
+    Arguments:
+        support: A NetworkX graph or number of qubits. Nodes can include a `"pos"` attribute
+            such that e.g.: `graph.nodes = {0: {"pos": (2,3)}, 1: {"pos": (0,0)}, ...}` which
+            will be used in backends that need qubit coordinates. Passing a number of qubits
+            calls `Register.all_to_all(n_qubits)`.
+        spacing: Value set as the distance between the two closest qubits. The spacing
+            argument is also available for all the class method constructors.
+
+    Examples:
+    ```python exec="on" source="material-block"
+    from qadence import Register
+
+    reg_all = Register.all_to_all(n_qubits = 4)
+    reg_line = Register.line(n_qubits = 4)
+    reg_circle = Register.circle(n_qubits = 4)
+    reg_squre = Register.square(qubits_side = 2)
+    reg_rect = Register.rectangular_lattice(qubits_row = 2, qubits_col = 2)
+    reg_triang = Register.triangular_lattice(n_cells_row = 2, n_cells_col = 2)
+    reg_honey = Register.honeycomb_lattice(n_cells_row = 2, n_cells_col = 2)
+    ```
+    """
+    if device_specs is not None and not isinstance(device_specs, RydbergDevice):
+        raise ValueError("Device specs are not valid. Please pass a `RydbergDevice` instance.")
+
+    self.device_specs = device_specs
+
+    self.graph = support if isinstance(support, nx.Graph) else alltoall_graph(support)
+
+    if spacing is not None and self.min_distance != 0.0:
+        _scale_node_positions(self.graph, self.min_distance, spacing)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ all_node_pairs: EdgeView + + + property + + +

+ + +
+ +

Return a list of all possible qubit pairs in the register.

+
+ +
+ +
+ + + +

+ coords: dict + + + property + + +

+ + +
+ +

Return the dictionary of qubit coordinates.

+
+ +
+ +
+ + + +

+ distances: dict + + + property + + +

+ + +
+ +

Return a dictionary of distances for all qubit pairs in the register.

+
+ +
+ +
+ + + +

+ edge_distances: dict + + + property + + +

+ + +
+ +

Return a dictionary of distances for the qubit pairs that are.

+

connected by an edge in the underlying NetworkX graph.

+
+ +
+ +
+ + + +

+ edges: EdgeView + + + property + + +

+ + +
+ +

Return the EdgeView of the underlying NetworkX graph.

+
+ +
+ +
+ + + +

+ min_distance: float + + + property + + +

+ + +
+ +

Return the minimum distance between two qubts in the register.

+
+ +
+ +
+ + + +

+ n_qubits: int + + + property + + +

+ + +
+ +

Total number of qubits in the register.

+
+ +
+ +
+ + + +

+ nodes: NodeView + + + property + + +

+ + +
+ +

Return the NodeView of the underlying NetworkX graph.

+
+ +
+ +
+ + + +

+ support: set + + + property + + +

+ + +
+ +

Return the set of qubits in the register.

+
+ +
+ + + +
+ + +

+ all_to_all(n_qubits, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a register with an all-to-all connectivity graph.

+

The graph is projected +onto a 2D space and the qubit coordinates are set using a spring layout algorithm.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

Total number of qubits.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def all_to_all(
+    cls,
+    n_qubits: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a register with an all-to-all connectivity graph.
+
+    The graph is projected
+    onto a 2D space and the qubit coordinates are set using a spring layout algorithm.
+
+    Arguments:
+        n_qubits: Total number of qubits.
+    """
+    return cls(alltoall_graph(n_qubits), spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ circle(n_qubits, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a circle register.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

Total number of qubits.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def circle(
+    cls,
+    n_qubits: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a circle register.
+
+    Arguments:
+        n_qubits: Total number of qubits.
+    """
+    graph = nx.grid_2d_graph(n_qubits, 1, periodic=True)
+    graph = nx.relabel_nodes(graph, {(i, 0): i for i in range(n_qubits)})
+    coords = nx.circular_layout(graph)
+    values = {i: {"pos": pos} for i, pos in coords.items()}
+    nx.set_node_attributes(graph, values)
+    return cls(graph, spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ draw(show=True) + +

+ + +
+ +

Draw the underlying NetworkX graph representing the register.

+ +
+ Source code in qadence/register.py +
def draw(self, show: bool = True) -> None:
+    """Draw the underlying NetworkX graph representing the register."""
+    coords = {i: n["pos"] for i, n in self.graph.nodes.items()}
+    nx.draw(self.graph, with_labels=True, pos=coords)
+    if show:
+        plt.gcf().show()
+
+
+
+ +
+ +
+ + +

+ from_coordinates(coords, lattice=LatticeTopology.ARBITRARY, spacing=None, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a register from a list of qubit coordinates.

+

Each node is added to the underlying +graph with the respective coordinates, but the edges are left empty.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
coords +
+

List of qubit coordinate tuples.

+
+

+ + TYPE: + list[tuple] + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def from_coordinates(
+    cls,
+    coords: list[tuple],
+    lattice: LatticeTopology | str = LatticeTopology.ARBITRARY,
+    spacing: float | None = None,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a register from a list of qubit coordinates.
+
+    Each node is added to the underlying
+    graph with the respective coordinates, but the edges are left empty.
+
+    Arguments:
+        coords: List of qubit coordinate tuples.
+    """
+    graph = nx.Graph()
+    for i, pos in enumerate(coords):
+        graph.add_node(i, pos=pos)
+    return cls(graph, spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ honeycomb_lattice(n_cells_row, n_cells_col, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a honeycomb lattice register.

+

Each cell is an hexagon made up of six qubits.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_cells_row +
+

Number of cells in each row.

+
+

+ + TYPE: + int + +

+
n_cells_col +
+

Number of cells in each column.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def honeycomb_lattice(
+    cls,
+    n_cells_row: int,
+    n_cells_col: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a honeycomb lattice register.
+
+    Each cell is an hexagon made up of six qubits.
+
+    Arguments:
+        n_cells_row: Number of cells in each row.
+        n_cells_col: Number of cells in each column.
+    """
+    graph = nx.hexagonal_lattice_graph(n_cells_row, n_cells_col)
+    graph = nx.relabel_nodes(graph, {(i, j): k for k, (i, j) in enumerate(graph.nodes)})
+    return cls(graph, spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ line(n_qubits, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a line register.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

Total number of qubits.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def line(
+    cls,
+    n_qubits: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a line register.
+
+    Arguments:
+        n_qubits: Total number of qubits.
+    """
+    return cls(line_graph(n_qubits), spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ rescale_coords(scaling) + +

+ + +
+ +

Rescale the coordinates of all qubits in the register.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
scaling +
+

Scaling value.

+
+

+ + TYPE: + float + +

+
+ +
+ Source code in qadence/register.py +
def rescale_coords(self, scaling: float) -> Register:
+    """
+    Rescale the coordinates of all qubits in the register.
+
+    Arguments:
+        scaling: Scaling value.
+    """
+    g = deepcopy(self.graph)
+    _scale_node_positions(g, min_distance=1.0, spacing=scaling)
+    return Register(g, spacing=None, device_specs=self.device_specs)
+
+
+
+ +
+ +
+ + +

+ square(qubits_side, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a square register.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
qubits_side +
+

Number of qubits on one side of the square.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def square(
+    cls,
+    qubits_side: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a square register.
+
+    Arguments:
+        qubits_side: Number of qubits on one side of the square.
+    """
+    n_points = 4 * (qubits_side - 1)
+
+    def gen_points() -> np.ndarray:
+        rotate_left = np.array([[0.0, -1.0], [1.0, 0.0]])
+        increment = np.array([0.0, 1.0])
+
+        points = [np.array([0.0, 0.0])]
+        counter = 1
+        while len(points) < n_points:
+            points.append(points[-1] + increment)
+
+            counter = (counter + 1) % qubits_side
+            if counter == 0:
+                increment = rotate_left.dot(increment)
+                counter = 1
+        points = np.array(points)  # type: ignore[assignment]
+        points -= np.mean(points, axis=0)
+
+        return points  # type: ignore[return-value]
+
+    graph = nx.grid_2d_graph(n_points, 1, periodic=True)
+    graph = nx.relabel_nodes(graph, {(i, 0): i for i in range(n_points)})
+    values = {i: {"pos": point} for i, point in zip(graph.nodes, gen_points())}
+    nx.set_node_attributes(graph, values)
+    return cls(graph, spacing, device_specs)
+
+
+
+ +
+ +
+ + +

+ triangular_lattice(n_cells_row, n_cells_col, spacing=1.0, device_specs=DEFAULT_DEVICE) + + + classmethod + + +

+ + +
+ +

Build a triangular lattice register.

+

Each cell is a triangle made up of three qubits.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_cells_row +
+

Number of cells in each row.

+
+

+ + TYPE: + int + +

+
n_cells_col +
+

Number of cells in each column.

+
+

+ + TYPE: + int + +

+
+ +
+ Source code in qadence/register.py +
@classmethod
+def triangular_lattice(
+    cls,
+    n_cells_row: int,
+    n_cells_col: int,
+    spacing: float = 1.0,
+    device_specs: RydbergDevice = DEFAULT_DEVICE,
+) -> Register:
+    """
+    Build a triangular lattice register.
+
+    Each cell is a triangle made up of three qubits.
+
+    Arguments:
+        n_cells_row: Number of cells in each row.
+        n_cells_col: Number of cells in each column.
+    """
+    return cls(triangular_lattice_graph(n_cells_row, n_cells_col), spacing, device_specs)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/serialization/index.html b/v1.7.4/api/serialization/index.html new file mode 100644 index 000000000..9b74a88d2 --- /dev/null +++ b/v1.7.4/api/serialization/index.html @@ -0,0 +1,3664 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Serialization - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Serialization

+ +

Serialization

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ SerializationModel(d=dict()) + + + dataclass + + +

+ + +
+ + +

A serialization model class to serialize data from QuantumModels,.

+

torch.nn.Module and similar structures. The data included in the +serialization logic includes: the AbstractBlock and its children +classes, QuantumCircuit, Register, and sympy expressions +(including Parameter class from qadence.parameters).

+

A children class must define the value attribute type and how to +handle it, since it is the main property for the class to be used +by the serialization process. For instance:

+
@dataclass
+class QuantumCircuitSerialization(SerializationModel):
+    value: QuantumCircuit = dataclass_field(init=False)
+
+    def __post_init__(self) -> None:
+        self.value = (
+            QuantumCircuit._from_dict(self.d)
+            if isinstance(self.d, dict)
+            else self.d
+        )
+
+ + + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ deserialize(d, as_torch=False) + +

+ + +
+ +

Supported Types:

+

AbstractBlock | QuantumCircuit | QuantumModel | Register | torch.nn.Module +Deserializes a dict to one of the supported types.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
d +
+

A dict containing a serialized object.

+
+

+ + TYPE: + dict + +

+
as_torch +
+

Whether to transform to torch for the deserialized object.

+
+

+ + TYPE: + bool + + + DEFAULT: + False + +

+
+

Returns: + AbstractBlock, QuantumCircuit, QuantumModel, Register, torch.nn.Module.

+

Examples: +

import torch
+from qadence import serialize, deserialize, hea, hamiltonian_factory, Z
+from qadence import QuantumCircuit, QuantumModel
+
+n_qubits = 2
+myblock = hea(n_qubits=n_qubits, depth=1)
+block_dict = serialize(myblock)
+print(block_dict)
+
+## Lets use myblock in a QuantumCircuit and serialize it.
+
+qc = QuantumCircuit(n_qubits, myblock)
+qc_dict = serialize(qc)
+qc_deserialized = deserialize(qc_dict)
+assert qc == qc_deserialized
+
+## Finally, let's wrap it in a QuantumModel
+obs = hamiltonian_factory(n_qubits, detuning = Z)
+qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+
+qm_dict = serialize(qm)
+qm_deserialized = deserialize(qm_dict)
+# Lets check if the loaded QuantumModel returns the same expectation
+assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({}))
+
+
+ +
{'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': 'HEA', 'blocks': [{'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RX', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('a5621bb2-c241-4ab6-a0d4-b6bc17551d40', {'name': 'theta_0', 'expression': "Parameter('theta_0')", 'symbols': {'theta_0': {'name': 'theta_0', 'trainable': 'True', 'value': '0.7796979347163269'}}})}}}, {'type': 'RX', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('e5121dfa-7612-41b4-b403-2cc6d0179468', {'name': 'theta_1', 'expression': "Parameter('theta_1')", 'symbols': {'theta_1': {'name': 'theta_1', 'trainable': 'True', 'value': '0.46222009867108493'}}})}}}]}, {'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RY', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('ef90e0db-cd34-4d6a-a929-f72f19de5d21', {'name': 'theta_2', 'expression': "Parameter('theta_2')", 'symbols': {'theta_2': {'name': 'theta_2', 'trainable': 'True', 'value': '0.25089012190063364'}}})}}}, {'type': 'RY', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('9ba0db59-9e27-4832-8639-6a713f527003', {'name': 'theta_3', 'expression': "Parameter('theta_3')", 'symbols': {'theta_3': {'name': 'theta_3', 'trainable': 'True', 'value': '0.2224366885096679'}}})}}}]}, {'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RX', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('4542f259-721a-43c0-b8aa-bcb3410aac4e', {'name': 'theta_4', 'expression': "Parameter('theta_4')", 'symbols': {'theta_4': {'name': 'theta_4', 'trainable': 'True', 'value': '0.0922895491272856'}}})}}}, {'type': 'RX', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('76ba6506-ba78-4613-9a47-867b1df6a1c2', {'name': 'theta_5', 'expression': "Parameter('theta_5')", 'symbols': {'theta_5': {'name': 'theta_5', 'trainable': 'True', 'value': '0.9348686801767666'}}})}}}]}]}, {'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'CNOT', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'X', 'qubit_support': (1,), 'tag': None}]}]}]}]}
+
+ +

+ +
+ Source code in qadence/serialization.py +
def deserialize(d: dict, as_torch: bool = False) -> SUPPORTED_TYPES:
+    """
+    Supported Types:
+
+    AbstractBlock | QuantumCircuit | QuantumModel | Register | torch.nn.Module
+    Deserializes a dict to one of the supported types.
+
+    Arguments:
+        d (dict): A dict containing a serialized object.
+        as_torch (bool): Whether to transform to torch for the deserialized object.
+    Returns:
+        AbstractBlock, QuantumCircuit, QuantumModel, Register, torch.nn.Module.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from qadence import serialize, deserialize, hea, hamiltonian_factory, Z
+    from qadence import QuantumCircuit, QuantumModel
+
+    n_qubits = 2
+    myblock = hea(n_qubits=n_qubits, depth=1)
+    block_dict = serialize(myblock)
+    print(block_dict)
+
+    ## Lets use myblock in a QuantumCircuit and serialize it.
+
+    qc = QuantumCircuit(n_qubits, myblock)
+    qc_dict = serialize(qc)
+    qc_deserialized = deserialize(qc_dict)
+    assert qc == qc_deserialized
+
+    ## Finally, let's wrap it in a QuantumModel
+    obs = hamiltonian_factory(n_qubits, detuning = Z)
+    qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+
+    qm_dict = serialize(qm)
+    qm_deserialized = deserialize(qm_dict)
+    # Lets check if the loaded QuantumModel returns the same expectation
+    assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({}))
+    ```
+    """
+    obj: SerializationModel
+    if d.get("expression"):
+        obj = ExpressionSerialization(d)
+    elif d.get("block") and d.get("register"):
+        obj = QuantumCircuitSerialization(d)
+    elif d.get("graph"):
+        obj = RegisterSerialization(d)
+    elif d.get("type"):
+        obj = BlockTypeSerialization(d)
+    else:
+        obj = ModelSerialization(d, as_torch=as_torch)
+    return obj.value
+
+
+
+ +
+ +
+ + +

+ load(file_path, map_location='cpu') + +

+ + +
+ +

Same as serialize/deserialize but for storing/loading files.

+

Supported types: AbstractBlock | QuantumCircuit | QuantumModel | Register +Loads a .json or .pt file to one of the supported types.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
file_path +
+

The name of the file.

+
+

+ + TYPE: + str + +

+
map_location +
+

In case of a .pt file, on which device to load the object (cpu,cuda).

+
+

+ + TYPE: + str + + + DEFAULT: + 'cpu' + +

+
+

Returns: + A object of type AbstractBlock, QuantumCircuit, QuantumModel, Register.

+

Examples: +

import torch
+from pathlib import Path
+import os
+
+from qadence import save, load, hea, hamiltonian_factory, Z
+from qadence import QuantumCircuit, QuantumModel
+
+n_qubits = 2
+myblock = hea(n_qubits=n_qubits, depth=1)
+qc = QuantumCircuit(n_qubits, myblock)
+# Lets store the circuit in a json file
+save(qc, '.', 'circ')
+loaded_qc = load(Path('circ.json'))
+qc == loaded_qc
+os.remove('circ.json')
+## Let's wrap it in a QuantumModel and store that
+obs = hamiltonian_factory(n_qubits, detuning = Z)
+qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+save(qm, folder= '.',file_name= 'quantum_model')
+qm_loaded = load('quantum_model.json')
+os.remove('quantum_model.json')
+
+
+ +

+
+ +

+ +
+ Source code in qadence/serialization.py +
def load(file_path: str | Path, map_location: str = "cpu") -> SUPPORTED_TYPES:
+    """
+    Same as serialize/deserialize but for storing/loading files.
+
+    Supported types: AbstractBlock | QuantumCircuit | QuantumModel | Register
+    Loads a .json or .pt file to one of the supported types.
+
+    Arguments:
+        file_path (str): The name of the file.
+        map_location (str): In case of a .pt file, on which device to load the object (cpu,cuda).
+    Returns:
+        A object of type AbstractBlock, QuantumCircuit, QuantumModel, Register.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from pathlib import Path
+    import os
+
+    from qadence import save, load, hea, hamiltonian_factory, Z
+    from qadence import QuantumCircuit, QuantumModel
+
+    n_qubits = 2
+    myblock = hea(n_qubits=n_qubits, depth=1)
+    qc = QuantumCircuit(n_qubits, myblock)
+    # Lets store the circuit in a json file
+    save(qc, '.', 'circ')
+    loaded_qc = load(Path('circ.json'))
+    qc == loaded_qc
+    os.remove('circ.json')
+    ## Let's wrap it in a QuantumModel and store that
+    obs = hamiltonian_factory(n_qubits, detuning = Z)
+    qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+    save(qm, folder= '.',file_name= 'quantum_model')
+    qm_loaded = load('quantum_model.json')
+    os.remove('quantum_model.json')
+    ```
+    """
+    d = {}
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if not os.path.exists(file_path):
+        logger.error(f"File {file_path} not found.")
+        raise FileNotFoundError
+    FORMAT = file_extension(file_path)
+    _, _, load_fn, _ = FORMAT_DICT[FORMAT]  # type: ignore[index]
+    try:
+        d = load_fn(file_path, map_location)
+        logger.debug(f"Successfully loaded {d} from {file_path}.")
+    except Exception as e:
+        logger.error(f"Unable to load Object from {file_path} due to {e}")
+    return deserialize(d)
+
+
+
+ +
+ +
+ + +

+ parse_expr_fn(code) + +

+ + +
+ +

A parsing expressions function that checks whether a given code is valid on.

+

the parsing grammar. The grammar is defined to be compatible with sympy +expressions, such as Float('-0.33261030434342942', precision=53), while +avoiding code injection such as 2*3 or __import__('os').system('ls -la').

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
code +
+

code to be parsed and checked.

+
+

+ + TYPE: + str + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + bool + + +
+

Boolean indicating whether the code matches the defined grammar or not.

+
+
+ +
+ Source code in qadence/serialization.py +
def parse_expr_fn(code: str) -> bool:
+    """
+    A parsing expressions function that checks whether a given code is valid on.
+
+    the parsing grammar. The grammar is defined to be compatible with `sympy`
+    expressions, such as `Float('-0.33261030434342942', precision=53)`, while
+    avoiding code injection such as `2*3` or `__import__('os').system('ls -la')`.
+
+    Args:
+        code (str): code to be parsed and checked.
+
+    Returns:
+        Boolean indicating whether the code matches the defined grammar or not.
+    """
+
+    parser = _parsing_serialize_expr
+    try:
+        parser.parse(code)
+    except NoMatch:
+        return False
+    else:
+        return True
+
+
+
+ +
+ +
+ + +

+ save(obj, folder, file_name='', format=SerializationFormat.JSON) + +

+ + +
+ +

Same as serialize/deserialize but for storing/loading files.

+

Supported types: +AbstractBlock | QuantumCircuit | QuantumModel | Register | torch.nn.Module +Saves a qadence object to a json/.pt.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
obj +
+
Either AbstractBlock, QuantumCircuit, QuantumModel, Register.
+
+
+

+ + TYPE: + AbstractBlock | QuantumCircuit | QuantumModel | Register + +

+
file_name +
+

The name of the file.

+
+

+ + TYPE: + str + + + DEFAULT: + '' + +

+
format +
+

The type of file to save.

+
+

+ + TYPE: + str + + + DEFAULT: + JSON + +

+
+

Returns: + None.

+

Examples: +

import torch
+from pathlib import Path
+import os
+
+from qadence import save, load, hea, hamiltonian_factory, Z
+from qadence import QuantumCircuit, QuantumModel
+
+n_qubits = 2
+myblock = hea(n_qubits=n_qubits, depth=1)
+qc = QuantumCircuit(n_qubits, myblock)
+# Lets store the circuit in a json file
+save(qc, '.', 'circ')
+loaded_qc = load(Path('circ.json'))
+qc == loaded_qc
+os.remove('circ.json')
+## Let's wrap it in a QuantumModel and store that
+obs = hamiltonian_factory(n_qubits, detuning = Z)
+qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+save(qm, folder= '.',file_name= 'quantum_model')
+qm_loaded = load('quantum_model.json')
+os.remove('quantum_model.json')
+
+
+ +

+
+ +

+ +
+ Source code in qadence/serialization.py +
def save(
+    obj: SUPPORTED_TYPES,
+    folder: str | Path,
+    file_name: str = "",
+    format: SerializationFormat = SerializationFormat.JSON,
+) -> None:
+    """
+    Same as serialize/deserialize but for storing/loading files.
+
+    Supported types:
+    AbstractBlock | QuantumCircuit | QuantumModel | Register | torch.nn.Module
+    Saves a qadence object to a json/.pt.
+
+    Arguments:
+        obj (AbstractBlock | QuantumCircuit | QuantumModel | Register):
+                Either AbstractBlock, QuantumCircuit, QuantumModel, Register.
+        file_name (str): The name of the file.
+        format (str): The type of file to save.
+    Returns:
+        None.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from pathlib import Path
+    import os
+
+    from qadence import save, load, hea, hamiltonian_factory, Z
+    from qadence import QuantumCircuit, QuantumModel
+
+    n_qubits = 2
+    myblock = hea(n_qubits=n_qubits, depth=1)
+    qc = QuantumCircuit(n_qubits, myblock)
+    # Lets store the circuit in a json file
+    save(qc, '.', 'circ')
+    loaded_qc = load(Path('circ.json'))
+    qc == loaded_qc
+    os.remove('circ.json')
+    ## Let's wrap it in a QuantumModel and store that
+    obs = hamiltonian_factory(n_qubits, detuning = Z)
+    qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+    save(qm, folder= '.',file_name= 'quantum_model')
+    qm_loaded = load('quantum_model.json')
+    os.remove('quantum_model.json')
+    ```
+    """
+    if not isinstance(obj, get_args(SUPPORTED_TYPES)):
+        logger.error(f"Serialization of object type {type(obj)} not supported.")
+    folder = Path(folder)
+    if not folder.is_dir():
+        logger.error(NotADirectoryError)
+    if file_name == "":
+        file_name = type(obj).__name__
+    try:
+        suffix, save_fn, _, save_params = FORMAT_DICT[format]
+        d = serialize(obj, save_params)
+        file_path = folder / Path(file_name + suffix)
+        save_fn(d, file_path)
+        logger.debug(f"Successfully saved {obj} from to {folder}.")
+    except Exception as e:
+        logger.error(f"Unable to write {type(obj)} to disk due to {e}")
+
+
+
+ +
+ +
+ + +

+ serialize(obj, save_params=False) + +

+ + +
+ +

Supported Types:

+

AbstractBlock | QuantumCircuit | QuantumModel | torch.nn.Module | Register | Module +Serializes a qadence object to a dictionary.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
obj +
+ +
+

+ + TYPE: + AbstractBlock | QuantumCircuit | QuantumModel | Register | Module + +

+
+

Returns: + A dict.

+

Examples: +

import torch
+from qadence import serialize, deserialize, hea, hamiltonian_factory, Z
+from qadence import QuantumCircuit, QuantumModel
+
+n_qubits = 2
+myblock = hea(n_qubits=n_qubits, depth=1)
+block_dict = serialize(myblock)
+print(block_dict)
+
+## Lets use myblock in a QuantumCircuit and serialize it.
+
+qc = QuantumCircuit(n_qubits, myblock)
+qc_dict = serialize(qc)
+qc_deserialized = deserialize(qc_dict)
+assert qc == qc_deserialized
+
+## Finally, let's wrap it in a QuantumModel
+obs = hamiltonian_factory(n_qubits, detuning = Z)
+qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+
+qm_dict = serialize(qm)
+qm_deserialized = deserialize(qm_dict)
+# Lets check if the loaded QuantumModel returns the same expectation
+assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({}))
+
+
+ +
{'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': 'HEA', 'blocks': [{'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RX', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('e860fce4-4bd9-4b34-94ae-ccc1a53f3295', {'name': 'theta_0', 'expression': "Parameter('theta_0')", 'symbols': {'theta_0': {'name': 'theta_0', 'trainable': 'True', 'value': '0.9833768713364459'}}})}}}, {'type': 'RX', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('55bba06f-c3b4-47ca-8067-c16a1d30fcd7', {'name': 'theta_1', 'expression': "Parameter('theta_1')", 'symbols': {'theta_1': {'name': 'theta_1', 'trainable': 'True', 'value': '0.9817422122943503'}}})}}}]}, {'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RY', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('e6a512ee-9d3f-4898-9b81-6d7a147d934e', {'name': 'theta_2', 'expression': "Parameter('theta_2')", 'symbols': {'theta_2': {'name': 'theta_2', 'trainable': 'True', 'value': '0.802746647746981'}}})}}}, {'type': 'RY', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('340c3723-de35-4472-889f-d456be86f189', {'name': 'theta_3', 'expression': "Parameter('theta_3')", 'symbols': {'theta_3': {'name': 'theta_3', 'trainable': 'True', 'value': '0.9612813590640975'}}})}}}]}, {'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'RX', 'qubit_support': (0,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('fd0ec3aa-fc38-4835-a7fb-9f1fe1634ed3', {'name': 'theta_4', 'expression': "Parameter('theta_4')", 'symbols': {'theta_4': {'name': 'theta_4', 'trainable': 'True', 'value': '0.18959335414634937'}}})}}}, {'type': 'RX', 'qubit_support': (1,), 'tag': None, 'parameters': {'_name_dict': {'parameter': ('2928b515-ba93-40ad-8bdb-3f7211399ff0', {'name': 'theta_5', 'expression': "Parameter('theta_5')", 'symbols': {'theta_5': {'name': 'theta_5', 'trainable': 'True', 'value': '0.652348575182211'}}})}}}]}]}, {'type': 'ChainBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'KronBlock', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'CNOT', 'qubit_support': (0, 1), 'tag': None, 'blocks': [{'type': 'X', 'qubit_support': (1,), 'tag': None}]}]}]}]}
+
+ +

+ +
+ Source code in qadence/serialization.py +
def serialize(obj: SUPPORTED_TYPES, save_params: bool = False) -> dict:
+    """
+    Supported Types:
+
+    AbstractBlock | QuantumCircuit | QuantumModel | torch.nn.Module | Register | Module
+    Serializes a qadence object to a dictionary.
+
+    Arguments:
+        obj (AbstractBlock | QuantumCircuit | QuantumModel | Register | torch.nn.Module):
+    Returns:
+        A dict.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    import torch
+    from qadence import serialize, deserialize, hea, hamiltonian_factory, Z
+    from qadence import QuantumCircuit, QuantumModel
+
+    n_qubits = 2
+    myblock = hea(n_qubits=n_qubits, depth=1)
+    block_dict = serialize(myblock)
+    print(block_dict)
+
+    ## Lets use myblock in a QuantumCircuit and serialize it.
+
+    qc = QuantumCircuit(n_qubits, myblock)
+    qc_dict = serialize(qc)
+    qc_deserialized = deserialize(qc_dict)
+    assert qc == qc_deserialized
+
+    ## Finally, let's wrap it in a QuantumModel
+    obs = hamiltonian_factory(n_qubits, detuning = Z)
+    qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad')
+
+    qm_dict = serialize(qm)
+    qm_deserialized = deserialize(qm_dict)
+    # Lets check if the loaded QuantumModel returns the same expectation
+    assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({}))
+    ```
+    """
+    if not isinstance(obj, get_args(SUPPORTED_TYPES)):
+        logger.error(TypeError(f"Serialization of object type {type(obj)} not supported."))
+
+    d: dict = dict()
+    try:
+        if isinstance(obj, core.Expr):
+            symb_dict = dict()
+            expr_dict = {"name": str(obj), "expression": srepr(obj)}
+            symbs: set[Parameter | core.Basic] = obj.free_symbols
+            if symbs:
+                symb_dict = {"symbols": {str(s): s._to_dict() for s in symbs}}
+            d = {**expr_dict, **symb_dict}
+        else:
+            if hasattr(obj, "_to_dict"):
+                model_to_dict: Callable = obj._to_dict
+                d = (
+                    model_to_dict(save_params)
+                    if isinstance(obj, torch.nn.Module)
+                    else model_to_dict()
+                )
+            elif hasattr(obj, "state_dict"):
+                d = {type(obj).__name__: obj.state_dict()}
+            else:
+                raise ValueError(f"Cannot serialize object {obj}.")
+    except Exception as e:
+        logger.error(f"Serialization of object {obj} failed due to {e}")
+    return d
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/states/index.html b/v1.7.4/api/states/index.html new file mode 100644 index 000000000..b3f7d8f5a --- /dev/null +++ b/v1.7.4/api/states/index.html @@ -0,0 +1,5194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + State preparation - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

State preparation

+ +

State Preparation Routines

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ ghz_block(n_qubits) + +

+ + +
+ +

Generates the abstract ghz state for a specified number of qubits.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ChainBlock + + +
+

A ChainBlock representing the GHZ state.

+
+
+

Examples: +

from qadence.states import ghz_block
+
+block = ghz_block(n_qubits=2)
+print(block)
+
+
+ +
ChainBlock(0,1)
+├── H(0)
+└── ChainBlock(0,1)
+    └── CNOT(0, 1)
+
+ +

+ +
+ Source code in qadence/states.py +
def ghz_block(n_qubits: int) -> ChainBlock:
+    """
+    Generates the abstract ghz state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+
+    Returns:
+        A ChainBlock representing the GHZ state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import ghz_block
+
+    block = ghz_block(n_qubits=2)
+    print(block)
+    ```
+    """
+    cnots = chain(CNOT(i - 1, i) for i in range(1, n_qubits))
+    return chain(H(0), cnots)
+
+
+
+ +
+ +
+ + +

+ ghz_state(n_qubits, batch_size=1) + +

+ + +
+ +

Creates a GHZ state.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
batch_size +
+

How many bitstrings to use.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import ghz_state
+
+print(ghz_state(n_qubits=2, batch_size=2))
+
+
+ +
tensor([[0.7071+0.j, 0.0000+0.j, 0.0000+0.j, 0.7071+0.j],
+        [0.7071+0.j, 0.0000+0.j, 0.0000+0.j, 0.7071+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def ghz_state(n_qubits: int, batch_size: int = 1) -> Tensor:
+    """
+    Creates a GHZ state.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+        batch_size (int): How many bitstrings to use.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import ghz_state
+
+    print(ghz_state(n_qubits=2, batch_size=2))
+    ```
+    """
+    norm = 1 / torch.sqrt(torch.tensor(2))
+    return norm * (zero_state(n_qubits, batch_size) + one_state(n_qubits, batch_size))
+
+
+
+ +
+ +
+ + +

+ is_normalized(wf, atol=NORMALIZATION_ATOL) + +

+ + +
+ +

Checks if a wave function is normalized.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
wf +
+

The wave function as a torch tensor.

+
+

+ + TYPE: + Tensor + +

+
atol +
+

The tolerance.

+
+

+ + TYPE: + float) + + + DEFAULT: + NORMALIZATION_ATOL + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + bool + + +
+

A bool.

+
+
+

Examples: +

from qadence.states import uniform_state, is_normalized
+
+print(is_normalized(uniform_state(2)))
+
+
+ +
True
+
+ +

+ +
+ Source code in qadence/states.py +
def is_normalized(wf: Tensor, atol: float = NORMALIZATION_ATOL) -> bool:
+    """
+    Checks if a wave function is normalized.
+
+    Arguments:
+        wf (torch.Tensor): The wave function as a torch tensor.
+        atol (float) : The tolerance.
+
+    Returns:
+        A bool.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import uniform_state, is_normalized
+
+    print(is_normalized(uniform_state(2)))
+    ```
+    """
+    if wf.dim() == 1:
+        wf = wf.unsqueeze(0)
+    sum_probs: Tensor = (wf.abs() ** 2).sum(dim=1)
+    ones = torch.ones_like(sum_probs)
+    return torch.allclose(sum_probs, ones, rtol=0.0, atol=atol)  # type: ignore[no-any-return]
+
+
+
+ +
+ +
+ + +

+ normalize(wf) + +

+ + +
+ +

Normalizes a wavefunction or batch of wave functions.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
wf +
+

Normalized wavefunctions.

+
+

+ + TYPE: + Tensor + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import uniform_state, normalize
+
+print(normalize(uniform_state(2, 2)))
+
+
+ +
tensor([[0.5000+0.j, 0.5000+0.j, 0.5000+0.j, 0.5000+0.j],
+        [0.5000+0.j, 0.5000+0.j, 0.5000+0.j, 0.5000+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def normalize(wf: Tensor) -> Tensor:
+    """
+    Normalizes a wavefunction or batch of wave functions.
+
+    Arguments:
+        wf (torch.Tensor): Normalized wavefunctions.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import uniform_state, normalize
+
+    print(normalize(uniform_state(2, 2)))
+    ```
+    """
+    if wf.dim() == 1:
+        return wf / torch.sqrt((wf.abs() ** 2).sum())
+    else:
+        return wf / torch.sqrt((wf.abs() ** 2).sum(1)).unsqueeze(1)
+
+
+
+ +
+ +
+ + +

+ one_block(n_qubits) + +

+ + +
+ +

Generates the abstract one state for a specified number of qubits.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

A KronBlock representing the one state.

+
+
+

Examples: +

from qadence.states import one_block
+
+block = one_block(n_qubits=2)
+print(block)
+
+
+ +
KronBlock(0,1)
+├── X(0)
+└── X(1)
+
+ +

+ +
+ Source code in qadence/states.py +
def one_block(n_qubits: int) -> KronBlock:
+    """
+    Generates the abstract one state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+
+    Returns:
+        A KronBlock representing the one state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import one_block
+
+    block = one_block(n_qubits=2)
+    print(block)
+    ```
+    """
+    return _from_op(X, n_qubits=n_qubits)
+
+
+
+ +
+ +
+ + +

+ one_state(n_qubits, batch_size=1) + +

+ + +
+ +

Generates the one state for a specified number of qubits.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
batch_size +
+

The batch size.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import one_state
+
+state = one_state(n_qubits=2)
+print(state)
+
+
+ +
tensor([[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def one_state(n_qubits: int, batch_size: int = 1) -> Tensor:
+    """
+    Generates the one state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+        batch_size (int): The batch size.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import one_state
+
+    state = one_state(n_qubits=2)
+    print(state)
+    ```
+    """
+    bitstring = "1" * n_qubits
+    return _state_from_bitstring(bitstring, batch_size)
+
+
+
+ +
+ +
+ + +

+ pmf(wf) + +

+ + +
+ +

Converts a wave function into a torch Distribution.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
wf +
+

The wave function as a torch tensor.

+
+

+ + TYPE: + Tensor + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Distribution + + +
+

A torch.distributions.Distribution.

+
+
+

Examples: +

from qadence.states import uniform_state, pmf
+
+print(pmf(uniform_state(2)).probs)
+
+
+ +
tensor([[0.2500, 0.2500, 0.2500, 0.2500]])
+
+ +

+ +
+ Source code in qadence/states.py +
def pmf(wf: Tensor) -> Distribution:
+    """
+    Converts a wave function into a torch Distribution.
+
+    Arguments:
+        wf (torch.Tensor): The wave function as a torch tensor.
+
+    Returns:
+        A torch.distributions.Distribution.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import uniform_state, pmf
+
+    print(pmf(uniform_state(2)).probs)
+    ```
+    """
+    return Categorical(torch.abs(torch.pow(wf, 2)))
+
+
+
+ +
+ +
+ + +

+ product_block(bitstring) + +

+ + +
+ +

Creates an abstract product state from a bitstring.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
bitstring +
+

A bitstring.

+
+

+ + TYPE: + str + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

A KronBlock representing the product state.

+
+
+

Examples: +

from qadence.states import product_block
+
+print(product_block("1100"))
+
+
+ +
KronBlock(0,1,2,3)
+├── X(0)
+├── X(1)
+├── I(2)
+└── I(3)
+
+ +

+ +
+ Source code in qadence/states.py +
def product_block(bitstring: str) -> KronBlock:
+    """
+    Creates an abstract product state from a bitstring.
+
+    Arguments:
+        bitstring (str): A bitstring.
+
+    Returns:
+        A KronBlock representing the product state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import product_block
+
+    print(product_block("1100"))
+    ```
+    """
+    return _block_from_bitstring(bitstring)
+
+
+
+ +
+ +
+ + +

+ product_state(bitstring, batch_size=1, endianness=Endianness.BIG, backend=BackendName.PYQTORCH) + +

+ + +
+ +

Creates a product state from a bitstring.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
bitstring +
+

A bitstring.

+
+

+ + TYPE: + str + +

+
batch_size +
+

Batch size.

+
+

+ + TYPE: + int) + + + DEFAULT: + 1 + +

+
backend +
+

The backend to use. Default is "pyqtorch".

+
+

+ + TYPE: + BackendName + + + DEFAULT: + PYQTORCH + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + ArrayLike + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import product_state
+
+print(product_state("1100", backend="pyqtorch"))
+print(product_state("1100", backend="horqrux"))
+
+
+ +
tensor([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
+         1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
+[0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j
+ 0.+0.j 0.+0.j 1.+0.j 0.+0.j 0.+0.j 0.+0.j]
+
+ +

+ +
+ Source code in qadence/states.py +
@singledispatch
+def product_state(
+    bitstring: str,
+    batch_size: int = 1,
+    endianness: Endianness = Endianness.BIG,
+    backend: BackendName = BackendName.PYQTORCH,
+) -> ArrayLike:
+    """
+    Creates a product state from a bitstring.
+
+    Arguments:
+        bitstring (str): A bitstring.
+        batch_size (int) : Batch size.
+        backend (BackendName): The backend to use. Default is "pyqtorch".
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import product_state
+
+    print(product_state("1100", backend="pyqtorch"))
+    print(product_state("1100", backend="horqrux"))
+    ```
+    """
+    if batch_size:
+        logger.debug(
+            "The input `batch_size` is going to be deprecated. "
+            "For now, default batch_size is set to 1."
+        )
+    return run(product_block(bitstring), backend=backend, endianness=endianness)
+
+
+
+ +
+ +
+ + +

+ rand_bitstring(N) + +

+ + +
+ +

Creates a random bistring.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
N +
+

The length of the bitstring.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + str + + +
+

A string.

+
+
+

Examples: +

from qadence.states import rand_bitstring
+
+print(rand_bitstring(N=8))
+
+
+ +
10000101
+
+ +

+ +
+ Source code in qadence/states.py +
def rand_bitstring(N: int) -> str:
+    """
+    Creates a random bistring.
+
+    Arguments:
+        N (int): The length of the bitstring.
+
+    Returns:
+        A string.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import rand_bitstring
+
+    print(rand_bitstring(N=8))
+    ```
+    """
+    return "".join(str(random.randint(0, 1)) for _ in range(N))
+
+
+
+ +
+ +
+ + +

+ rand_product_block(n_qubits) + +

+ + +
+ +

Creates a block representing a random abstract product state.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

A KronBlock representing the product state.

+
+
+

Examples: +

from qadence.states import rand_product_block
+
+print(rand_product_block(n_qubits=2))
+
+
+ +
KronBlock(0,1)
+├── X(0)
+└── X(1)
+
+ +

+ +
+ Source code in qadence/states.py +
def rand_product_block(n_qubits: int) -> KronBlock:
+    """
+    Creates a block representing a random abstract product state.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+
+    Returns:
+        A KronBlock representing the product state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import rand_product_block
+
+    print(rand_product_block(n_qubits=2))
+    ```
+    """
+    return product_block(rand_bitstring(n_qubits))
+
+
+
+ +
+ +
+ + +

+ rand_product_state(n_qubits, batch_size=1) + +

+ + +
+ +

Creates a random product state.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
batch_size +
+

How many bitstrings to use.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import rand_product_state
+
+print(rand_product_state(n_qubits=2, batch_size=2))
+
+
+ +
tensor([[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+        [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def rand_product_state(n_qubits: int, batch_size: int = 1) -> Tensor:
+    """
+    Creates a random product state.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+        batch_size (int): How many bitstrings to use.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import rand_product_state
+
+    print(rand_product_state(n_qubits=2, batch_size=2))
+    ```
+    """
+    wf_batch = torch.zeros(batch_size, 2**n_qubits, dtype=DTYPE)
+    rand_pos = torch.randint(0, 2**n_qubits, (batch_size,))
+    wf_batch[torch.arange(batch_size), rand_pos] = torch.tensor(1.0 + 0j, dtype=DTYPE)
+    return wf_batch
+
+
+
+ +
+ +
+ + +

+ random_state(n_qubits, batch_size=1, backend=BackendName.PYQTORCH, type=StateGeneratorType.HAAR_MEASURE_FAST) + +

+ + +
+ +

Generates a random state for a specified number of qubits.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
backend +
+

The backend to use.

+
+

+ + TYPE: + str + + + DEFAULT: + PYQTORCH + +

+
batch_size +
+

The batch size.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
type +
+

StateGeneratorType.

+
+

+ + DEFAULT: + HAAR_MEASURE_FAST + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import random_state, StateGeneratorType
+from qadence.states import random_state, is_normalized, pmf
+from qadence.types import BackendName
+from torch.distributions import Distribution
+
+### We have the following options:
+print([g.value for g in StateGeneratorType])
+
+n_qubits = 2
+# The default is StateGeneratorType.HAAR_MEASURE_FAST
+state = random_state(n_qubits=n_qubits)
+print(state)
+
+### Lets initialize a state using random rotations, i.e., StateGeneratorType.RANDOM_ROTATIONS.
+random = random_state(n_qubits=n_qubits, type=StateGeneratorType.RANDOM_ROTATIONS)
+print(random)
+
+
+ +
['RandomRotations', 'HaarMeasureFast', 'HaarMeasureSlow']
+tensor([[-0.0849-0.0271j,  0.0269-0.1636j, -0.2845-0.1571j,  0.7953-0.4759j]])
+tensor([[0.7884+0.0000j, 0.0973+0.0000j, 0.0000-0.6028j, 0.0000-0.0744j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def random_state(
+    n_qubits: int,
+    batch_size: int = 1,
+    backend: str = BackendName.PYQTORCH,
+    type: StateGeneratorType = StateGeneratorType.HAAR_MEASURE_FAST,
+) -> Tensor:
+    """
+    Generates a random state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+        backend (str): The backend to use.
+        batch_size (int): The batch size.
+        type : StateGeneratorType.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import random_state, StateGeneratorType
+    from qadence.states import random_state, is_normalized, pmf
+    from qadence.types import BackendName
+    from torch.distributions import Distribution
+
+    ### We have the following options:
+    print([g.value for g in StateGeneratorType])
+
+    n_qubits = 2
+    # The default is StateGeneratorType.HAAR_MEASURE_FAST
+    state = random_state(n_qubits=n_qubits)
+    print(state)
+
+    ### Lets initialize a state using random rotations, i.e., StateGeneratorType.RANDOM_ROTATIONS.
+    random = random_state(n_qubits=n_qubits, type=StateGeneratorType.RANDOM_ROTATIONS)
+    print(random)
+    ```
+    """
+
+    if type == StateGeneratorType.HAAR_MEASURE_FAST:
+        state = concat(tuple(_rand_haar_fast(n_qubits) for _ in range(batch_size)), dim=0)
+    elif type == StateGeneratorType.HAAR_MEASURE_SLOW:
+        state = concat(tuple(_rand_haar_slow(n_qubits) for _ in range(batch_size)), dim=0)
+    elif type == StateGeneratorType.RANDOM_ROTATIONS:
+        state = run(_abstract_random_state(n_qubits, batch_size))  # type: ignore
+    assert all(list(map(is_normalized, state)))
+    return state
+
+
+
+ +
+ +
+ + +

+ uniform_block(n_qubits) + +

+ + +
+ +

Generates the abstract uniform state for a specified number of qubits.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

A KronBlock representing the uniform state.

+
+
+

Examples: +

from qadence.states import uniform_block
+
+block = uniform_block(n_qubits=2)
+print(block)
+
+
+ +
KronBlock(0,1)
+├── H(0)
+└── H(1)
+
+ +

+ +
+ Source code in qadence/states.py +
def uniform_block(n_qubits: int) -> KronBlock:
+    """
+    Generates the abstract uniform state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+
+    Returns:
+        A KronBlock representing the uniform state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import uniform_block
+
+    block = uniform_block(n_qubits=2)
+    print(block)
+    ```
+    """
+    return _from_op(H, n_qubits=n_qubits)
+
+
+
+ +
+ +
+ + +

+ uniform_state(n_qubits, batch_size=1) + +

+ + +
+ +

Generates the uniform state for a specified number of qubits.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
batch_size +
+

The batch size.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import uniform_state
+
+state = uniform_state(n_qubits=2)
+print(state)
+
+
+ +
tensor([[0.5000+0.j, 0.5000+0.j, 0.5000+0.j, 0.5000+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def uniform_state(n_qubits: int, batch_size: int = 1) -> Tensor:
+    """
+    Generates the uniform state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+        batch_size (int): The batch size.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import uniform_state
+
+    state = uniform_state(n_qubits=2)
+    print(state)
+    ```
+    """
+    norm = 1 / torch.sqrt(torch.tensor(2**n_qubits))
+    return norm * torch.ones(batch_size, 2**n_qubits, dtype=DTYPE)
+
+
+
+ +
+ +
+ + +

+ zero_block(n_qubits) + +

+ + +
+ +

Generates the abstract zero state for a specified number of qubits.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits.

+
+

+ + TYPE: + int + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + KronBlock + + +
+

A KronBlock representing the zero state.

+
+
+

Examples: +

from qadence.states import zero_block
+
+block = zero_block(n_qubits=2)
+print(block)
+
+
+ +
KronBlock(0,1)
+├── I(0)
+└── I(1)
+
+ +

+ +
+ Source code in qadence/states.py +
def zero_block(n_qubits: int) -> KronBlock:
+    """
+    Generates the abstract zero state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits.
+
+    Returns:
+        A KronBlock representing the zero state.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import zero_block
+
+    block = zero_block(n_qubits=2)
+    print(block)
+    ```
+    """
+    return _from_op(I, n_qubits=n_qubits)
+
+
+
+ +
+ +
+ + +

+ zero_state(n_qubits, batch_size=1) + +

+ + +
+ +

Generates the zero state for a specified number of qubits.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
n_qubits +
+

The number of qubits for which the zero state is to be generated.

+
+

+ + TYPE: + int + +

+
batch_size +
+

The batch size for the zero state.

+
+

+ + TYPE: + int + + + DEFAULT: + 1 + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Tensor + + +
+

A torch.Tensor.

+
+
+

Examples: +

from qadence.states import zero_state
+
+state = zero_state(n_qubits=2)
+print(state)
+
+
+ +
tensor([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
+
+ +

+ +
+ Source code in qadence/states.py +
def zero_state(n_qubits: int, batch_size: int = 1) -> Tensor:
+    """
+    Generates the zero state for a specified number of qubits.
+
+    Arguments:
+        n_qubits (int): The number of qubits for which the zero state is to be generated.
+        batch_size (int): The batch size for the zero state.
+
+    Returns:
+        A torch.Tensor.
+
+    Examples:
+    ```python exec="on" source="material-block" result="json"
+    from qadence.states import zero_state
+
+    state = zero_state(n_qubits=2)
+    print(state)
+    ```
+    """
+    bitstring = "0" * n_qubits
+    return _state_from_bitstring(bitstring, batch_size)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/transpile/index.html b/v1.7.4/api/transpile/index.html new file mode 100644 index 000000000..1032481b4 --- /dev/null +++ b/v1.7.4/api/transpile/index.html @@ -0,0 +1,3598 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Transpilation - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Transpilation

+ +

Contains functions that operate on blocks and circuits to transpile them to new blocks/circuits.

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ transpile(*fs) + +

+ + +
+ +

AbstractBlock or QuantumCircuit transpilation.

+

Compose functions that +accept a circuit/block and returns a circuit/block.

+ + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
*fs +
+

composable functions that either map blocks to blocks +(Callable[[AbstractBlock], AbstractBlock]) +or circuits to circuits (Callable[[QuantumCircuit], QuantumCircuit]).

+
+

+ + TYPE: + Callable + + + DEFAULT: + () + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + Callable + + +
+

Composed function.

+
+
+

Examples:

+

Flatten a block of nested chains and krons: +

from qadence import *
+from qadence.transpile import transpile, flatten, scale_primitive_blocks_only
+
+b = chain(2 * chain(chain(X(0), Y(0))), kron(kron(X(0), X(1))))
+print(b)
+
+# both flatten and scale_primitive_blocks_only are functions that accept and
+# return a block
+t = transpile(flatten, scale_primitive_blocks_only)(b)
+print(t)
+
+
+ +
ChainBlock(0,1)
+├── [mul: 2] 
+   └── ChainBlock(0)
+       └── ChainBlock(0)
+           ├── X(0)
+           └── Y(0)
+└── KronBlock(0,1)
+    └── KronBlock(0,1)
+        ├── X(0)
+        └── X(1)
+
+ChainBlock(0,1)
+├── [mul: 2.000] 
+   └── X(0)
+├── Y(0)
+└── KronBlock(0,1)
+    ├── X(0)
+    └── X(1)
+
+ +

+

We also proved a decorator to easily turn a function Callable[[AbstractBlock], AbstractBlock] +into a Callable[[QuantumCircuit], QuantumCircuit] to be used in circuit transpilation. +

from qadence import *
+from qadence.transpile import transpile, blockfn_to_circfn, flatten
+
+# We want to pass this circuit to `transpile` instead of a block,
+# so we need functions that map from a circuit to a circuit.
+circ = QuantumCircuit(2, chain(chain(X(0), chain(X(1)))))
+
+@blockfn_to_circfn
+def fn(block):
+    # un-decorated function accepts a block and returns a block
+    return block * block
+
+transp = transpile(
+    # the decorated function accepts a circuit and returns a circuit
+    fn,
+    # already existing functions can also be decorated
+    blockfn_to_circfn(flatten)
+)
+print(transp(circ))
+
+
+ +
ChainBlock(0,1)
+├── ChainBlock(0,1)
+   ├── X(0)
+   └── X(1)
+└── ChainBlock(0,1)
+    ├── X(0)
+    └── X(1)
+
+ +

+ +
+ Source code in qadence/transpile/transpile.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def transpile(*fs: Callable) -> Callable:
+    """`AbstractBlock` or `QuantumCircuit` transpilation.
+
+    Compose functions that
+    accept a circuit/block and returns a circuit/block.
+
+    Arguments:
+        *fs: composable functions that either map blocks to blocks
+            (`Callable[[AbstractBlock], AbstractBlock]`)
+            or circuits to circuits (`Callable[[QuantumCircuit], QuantumCircuit]`).
+
+    Returns:
+        Composed function.
+
+    Examples:
+
+    Flatten a block of nested chains and krons:
+    ```python exec="on" source="material-block" result="json"
+    from qadence import *
+    from qadence.transpile import transpile, flatten, scale_primitive_blocks_only
+
+    b = chain(2 * chain(chain(X(0), Y(0))), kron(kron(X(0), X(1))))
+    print(b)
+    print() # markdown-exec: hide
+
+    # both flatten and scale_primitive_blocks_only are functions that accept and
+    # return a block
+    t = transpile(flatten, scale_primitive_blocks_only)(b)
+    print(t)
+    ```
+
+    We also proved a decorator to easily turn a function `Callable[[AbstractBlock], AbstractBlock]`
+    into a `Callable[[QuantumCircuit], QuantumCircuit]` to be used in circuit transpilation.
+    ```python exec="on" source="material-block" result="json"
+    from qadence import *
+    from qadence.transpile import transpile, blockfn_to_circfn, flatten
+
+    # We want to pass this circuit to `transpile` instead of a block,
+    # so we need functions that map from a circuit to a circuit.
+    circ = QuantumCircuit(2, chain(chain(X(0), chain(X(1)))))
+
+    @blockfn_to_circfn
+    def fn(block):
+        # un-decorated function accepts a block and returns a block
+        return block * block
+
+    transp = transpile(
+        # the decorated function accepts a circuit and returns a circuit
+        fn,
+        # already existing functions can also be decorated
+        blockfn_to_circfn(flatten)
+    )
+    print(transp(circ))
+    ```
+    """
+    return lambda x: reduce(lambda acc, f: f(acc), reversed(fs), x)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ chain_single_qubit_ops(block) + +

+ + +
+ +

Transpile a chain of krons into a kron of chains of single qubit operations.

+

Examples: +

from qadence import hea
+from qadence.transpile.block import chain_single_qubit_ops
+
+# Consider a single HEA layer
+block = hea(2,1)
+print(block)
+
+# After applying chain_single_qubit_ops, we get:
+print(chain_single_qubit_ops(block))
+
+
ChainBlock(0,1) [tag: HEA]
+├── ChainBlock(0,1)
+   ├── KronBlock(0,1)
+      ├── RX(0) [params: ['theta_0']]
+      └── RX(1) [params: ['theta_1']]
+   ├── KronBlock(0,1)
+      ├── RY(0) [params: ['theta_2']]
+      └── RY(1) [params: ['theta_3']]
+   └── KronBlock(0,1)
+       ├── RX(0) [params: ['theta_4']]
+       └── RX(1) [params: ['theta_5']]
+└── ChainBlock(0,1)
+    └── KronBlock(0,1)
+        └── CNOT(0, 1)
+ChainBlock(0,1)
+├── KronBlock(0,1)
+   ├── ChainBlock(0)
+      ├── RX(0) [params: ['theta_0']]
+      ├── RY(0) [params: ['theta_2']]
+      └── RX(0) [params: ['theta_4']]
+   └── ChainBlock(1)
+       ├── RX(1) [params: ['theta_1']]
+       ├── RY(1) [params: ['theta_3']]
+       └── RX(1) [params: ['theta_5']]
+└── ChainBlock(0,1)
+    └── KronBlock(0,1)
+        └── CNOT(0, 1)
+

+ +
+ Source code in qadence/transpile/block.py +
def chain_single_qubit_ops(block: AbstractBlock) -> AbstractBlock:
+    """Transpile a chain of krons into a kron of chains of single qubit operations.
+
+    Examples:
+    ```python exec="on" source="above" result="json"
+    from qadence import hea
+    from qadence.transpile.block import chain_single_qubit_ops
+
+    # Consider a single HEA layer
+    block = hea(2,1)
+    print(block)
+
+    # After applying chain_single_qubit_ops, we get:
+    print(chain_single_qubit_ops(block))
+    ```
+    """
+    if is_chain_of_primitivekrons(block):
+        try:
+            return kron(*map(lambda bs: chain(*bs), zip(*block)))  # type: ignore[misc]
+        except Exception as e:
+            logger.debug(
+                f"Unable to transpile {block} using chain_single_qubit_ops\
+                         due to {e}. Returning original circuit."
+            )
+            return block
+
+    elif isinstance(block, CompositeBlock):
+        return _construct(type(block), tuple(chain_single_qubit_ops(b) for b in block.blocks))
+    else:
+        return block
+
+
+
+ +
+ +
+ + +

+ scale_primitive_blocks_only(block, scale=None) + +

+ + +
+ +

Push the scale all the way down into the leaves of the block tree.

+

When given a scaled CompositeBlock consisting of several PrimitiveBlocks.

+ + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
block +
+

The block to be transpiled.

+
+

+ + TYPE: + AbstractBlock + +

+
scale +
+

An optional scale parameter. Only to be used for recursive calls internally.

+
+

+ + TYPE: + Basic + + + DEFAULT: + None + +

+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ AbstractBlock + +
+

A block of the same type where the scales have been moved into the subblocks.

+
+

+ + TYPE: + AbstractBlock + +

+
+

Examples:

+

There are two different cases: +ChainBlocks/KronBlocks: Only the first subblock needs to be scaled because chains/krons +represent multiplications. +

from qadence import chain, X, RX
+from qadence.transpile import scale_primitive_blocks_only
+b = 2 * chain(X(0), RX(0, "theta"))
+print(b)
+# After applying scale_primitive_blocks_only
+print(scale_primitive_blocks_only(b))
+
+
[mul: 2] 
+└── ChainBlock(0)
+    ├── X(0)
+    └── RX(0) [params: ['theta']]
+ChainBlock(0)
+├── [mul: 2.000] 
+   └── X(0)
+└── RX(0) [params: ['theta']]
+

+

AddBlocks: Consider 2 * add(X(0), RX(0, "theta")). The scale needs to be added to all +subblocks. We get add(2 * X(0), 2 * RX(0, "theta")). +

from qadence import add, X, RX
+from qadence.transpile import scale_primitive_blocks_only
+b = 2 * add(X(0), RX(0, "theta"))
+print(b)
+# After applying scale_primitive_blocks_only
+print(scale_primitive_blocks_only(b))
+
+
[mul: 2] 
+└── AddBlock(0)
+    ├── X(0)
+    └── RX(0) [params: ['theta']]
+AddBlock(0)
+├── [mul: 2.000] 
+   └── X(0)
+└── [mul: 2.000] 
+    └── RX(0) [params: ['theta']]
+

+ +
+ Source code in qadence/transpile/block.py +
@singledispatch
+def scale_primitive_blocks_only(block: AbstractBlock, scale: sympy.Basic = None) -> AbstractBlock:
+    """Push the scale all the way down into the leaves of the block tree.
+
+    When given a scaled CompositeBlock consisting of several PrimitiveBlocks.
+
+    Arguments:
+        block: The block to be transpiled.
+        scale: An optional scale parameter. Only to be used for recursive calls internally.
+
+    Returns:
+        AbstractBlock: A block of the same type where the scales have been moved into the subblocks.
+
+    Examples:
+
+    There are two different cases:
+    `ChainBlock`s/`KronBlock`s: Only the first subblock needs to be scaled because chains/krons
+    represent multiplications.
+    ```python exec="on" source="above" result="json"
+    from qadence import chain, X, RX
+    from qadence.transpile import scale_primitive_blocks_only
+    b = 2 * chain(X(0), RX(0, "theta"))
+    print(b)
+    # After applying scale_primitive_blocks_only
+    print(scale_primitive_blocks_only(b))
+    ```
+
+    `AddBlock`s: Consider 2 * add(X(0), RX(0, "theta")).  The scale needs to be added to all
+    subblocks.  We get add(2 * X(0), 2 * RX(0, "theta")).
+    ```python exec="on" source="above" result="json"
+    from qadence import add, X, RX
+    from qadence.transpile import scale_primitive_blocks_only
+    b = 2 * add(X(0), RX(0, "theta"))
+    print(b)
+    # After applying scale_primitive_blocks_only
+    print(scale_primitive_blocks_only(b))
+    ```
+    """
+    raise NotImplementedError(f"scale_primitive_blocks_only is not implemented for {type(block)}")
+
+
+
+ +
+ +
+ + +

+ set_trainable(blocks, value=True, inplace=True) + +

+ + +
+ +

Set the trainability of all parameters in a block to a given value.

+ + + + + + + + + + + + + + + + + + + + + + + +
PARAMETERDESCRIPTION
blocks +
+

Block or list of blocks for which +to set the trainable attribute

+
+

+ + TYPE: + AbstractBlock | list[AbstractBlock] + +

+
value +
+

The value of the trainable attribute to assign to the input blocks

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
inplace +
+

Whether to modify the block(s) in place or not. Currently, only

+
+

+ + TYPE: + bool + + + DEFAULT: + True + +

+
+ + + + + + + + + + + + + + + +
RAISESDESCRIPTION
+ + NotImplementedError + + +
+

if the inplace argument is set to False, the function will +raise this exception

+
+
+ + + + + + + + + + + + + + + +
RETURNSDESCRIPTION
+ + AbstractBlock | list[AbstractBlock] + + +
+

AbstractBlock | list[AbstractBlock]: the input block or list of blocks with the trainable +attribute set to the given value

+
+
+ +
+ Source code in qadence/transpile/block.py +
44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
def set_trainable(
+    blocks: AbstractBlock | list[AbstractBlock], value: bool = True, inplace: bool = True
+) -> AbstractBlock | list[AbstractBlock]:
+    """Set the trainability of all parameters in a block to a given value.
+
+    Args:
+        blocks (AbstractBlock | list[AbstractBlock]): Block or list of blocks for which
+            to set the trainable attribute
+        value (bool, optional): The value of the trainable attribute to assign to the input blocks
+        inplace (bool, optional): Whether to modify the block(s) in place or not. Currently, only
+
+    Raises:
+        NotImplementedError: if the `inplace` argument is set to False, the function will
+            raise  this exception
+
+    Returns:
+        AbstractBlock | list[AbstractBlock]: the input block or list of blocks with the trainable
+            attribute set to the given value
+    """
+
+    if isinstance(blocks, AbstractBlock):
+        blocks = [blocks]
+
+    if inplace:
+        for block in blocks:
+            params: list[sympy.Basic] = parameters(block)
+            for p in params:
+                if not p.is_number:
+                    p.trainable = value
+    else:
+        raise NotImplementedError("Not inplace set_trainable is not yet available")
+
+    return blocks if len(blocks) > 1 else blocks[0]
+
+
+
+ +
+ +
+ + +

+ validate(block) + +

+ + +
+ +

Moves a block from global to local qubit numbers by adding PutBlocks.

+

Reassigns qubit locations appropriately.

+

Example

+
from qadence.blocks import chain
+from qadence.operations import X
+from qadence.transpile import validate
+
+x = chain(chain(X(0)), chain(X(1)))
+print(x)
+print(validate(x))
+
+
ChainBlock(0,1)
+├── ChainBlock(0)
+   └── X(0)
+└── ChainBlock(1)
+    └── X(1)
+ChainBlock(0,1)
+├── put on (0)
+   └── ChainBlock(0)
+       └── put on (0)
+           └── X(0)
+└── put on (1)
+    └── ChainBlock(0)
+        └── put on (0)
+            └── X(0)
+
+ +
+ Source code in qadence/transpile/block.py +
def validate(block: AbstractBlock) -> AbstractBlock:
+    """Moves a block from global to local qubit numbers by adding PutBlocks.
+
+    Reassigns qubit locations appropriately.
+
+    # Example
+    ```python exec="on" source="above" result="json"
+    from qadence.blocks import chain
+    from qadence.operations import X
+    from qadence.transpile import validate
+
+    x = chain(chain(X(0)), chain(X(1)))
+    print(x)
+    print(validate(x))
+    ```
+    """
+    vblock: AbstractBlock
+    from qadence.transpile import reassign
+
+    if isinstance(block, ControlBlock):
+        vblock = deepcopy(block)
+        b: AbstractBlock
+        (b,) = block.blocks
+        b = reassign(b, {i: i - min(b.qubit_support) for i in b.qubit_support})
+        b = validate(b)
+        vblock.blocks = (b,)  # type: ignore[assignment]
+
+    elif isinstance(block, CompositeBlock):
+        blocks = []
+        for b in block.blocks:
+            mi, ma = min(b.qubit_support), max(b.qubit_support)
+            nb = reassign(b, {i: i - min(b.qubit_support) for i in b.qubit_support})
+            nb = validate(nb)
+            nb = PutBlock(nb, tuple(range(mi, ma + 1)))
+            blocks.append(nb)
+        try:
+            vblock = _construct(type(block), tuple(blocks))
+        except AssertionError as e:
+            if str(e) == "Make sure blocks act on distinct qubits!":
+                vblock = chain(*blocks)
+            else:
+                raise e
+
+    elif isinstance(block, PrimitiveBlock):
+        vblock = deepcopy(block)
+
+    else:
+        raise NotImplementedError
+
+    vblock.tag = block.tag
+    return vblock
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/api/types/index.html b/v1.7.4/api/types/index.html new file mode 100644 index 000000000..1e554cb0c --- /dev/null +++ b/v1.7.4/api/types/index.html @@ -0,0 +1,8724 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Types - Qadence + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Types

+ +

Qadence Types

+ + +
+ + + + +
+ + + +
+ + + + + + + +
+ + + +

+ TArray = Union[Iterable, Tensor, np.ndarray] + + + module-attribute + + +

+ + +
+ +

Union of common array types.

+
+ +
+ +
+ + + +

+ TGenerator = Union[Tensor, sympy.Array, sympy.Basic] + + + module-attribute + + +

+ + +
+ +

Union of torch tensors and numpy arrays.

+
+ +
+ +
+ + + +

+ TNumber = Union[int, float, complex, np.int64, np.float64] + + + module-attribute + + +

+ + +
+ +

Union of python and numpy numeric types.

+
+ +
+ +
+ + + +

+ TParameter = Union[TNumber, Tensor, sympy.Basic, str] + + + module-attribute + + +

+ + +
+ +

Union of numbers, tensors, and parameter types.

+
+ +
+ + +
+ + + +

+ AlgoHEvo + + +

+ + +
+

+ Bases: StrEnum

+ + +

Hamiltonian Evolution algorithms that can be used by the backend.

+ + + + +
+ + + + + + + +
+ + + +

+ EIG = 'EIG' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Using Hamiltonian diagonalization.

+
+ +
+ +
+ + + +

+ EXP = 'EXP' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Using torch.matrix_exp on the generator matrix.

+
+ +
+ +
+ + + +

+ RK4 = 'RK4' + + + class-attribute + instance-attribute + + +

+ + +
+ +

4th order Runge-Kutta approximation.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ AnsatzType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Ansatz types for variational circuits.

+ + + + +
+ + + + + + + +
+ + + +

+ HEA = 'hea' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Hardware-efficient ansatz.

+
+ +
+ +
+ + + +

+ IIA = 'iia' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Identity-Initialised Ansatz.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ BasisSet + + +

+ + +
+

+ Bases: StrEnum

+ + +

Basis set for feature maps.

+ + + + +
+ + + + + + + +
+ + + +

+ CHEBYSHEV = 'Chebyshev' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Chebyshev polynomials of the first kind.

+
+ +
+ +
+ + + +

+ FOURIER = 'Fourier' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Fourier basis set.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ DeviceType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Supported types of devices for Pulser backend.

+ + + + +
+ + + + + + + +
+ + + +

+ IDEALIZED = 'IdealDevice' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Idealized device, least realistic.

+
+ +
+ +
+ + + +

+ REALISTIC = 'RealisticDevice' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Device with realistic specs.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ Endianness + + +

+ + +
+

+ Bases: StrEnum

+ + +

The endianness convention to use.

+ + + + +
+ + + + + + + +
+ + + +

+ BIG = 'Big' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use Big endianness.

+
+ +
+ +
+ + + +

+ LITTLE = 'Little' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use little endianness.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ExperimentTrackingTool + + +

+ + +
+

+ Bases: StrEnum

+ + + + + +
+ + + + + + + +
+ + + +

+ MLFLOW = 'mlflow' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the ml-flow experiment tracker.

+
+ +
+ +
+ + + +

+ TENSORBOARD = 'tensorboard' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the tensorboard experiment tracker.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ FigFormat + + +

+ + +
+

+ Bases: StrEnum

+ + +

Available output formats for exporting visualized circuits to a file.

+ + + + +
+ + + + + + + +
+ + + +

+ PDF = 'PDF' + + + class-attribute + instance-attribute + + +

+ + +
+ +

PDF format.

+
+ +
+ +
+ + + +

+ PNG = 'PNG' + + + class-attribute + instance-attribute + + +

+ + +
+ +

PNG format.

+
+ +
+ +
+ + + +

+ SVG = 'SVG' + + + class-attribute + instance-attribute + + +

+ + +
+ +

SVG format.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ GenDAQC + + +

+ + +
+

+ Bases: StrEnum

+ + +

The type of interaction for the DAQC transform.

+ + + + +
+ + + + + + + +
+ + + +

+ NN = 'NN' + + + class-attribute + instance-attribute + + +

+ + +
+ +

NN

+
+ +
+ +
+ + + +

+ ZZ = 'ZZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

ZZ

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ InputDiffMode + + +

+ + +
+

+ Bases: StrEnum

+ + +

Derivative modes w.r.t inputs of UFAs.

+ + + + +
+ + + + + + + +
+ + + +

+ AD = 'ad' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Reverse automatic differentiation.

+
+ +
+ +
+ + + +

+ FD = 'fd' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Central finite differencing.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ Interaction + + +

+ + +
+

+ Bases: StrEnum

+ + +

Interaction types used in.

+ + + + + +
+ + + + + + + +
+ + + +

+ NN = 'NN' + + + class-attribute + instance-attribute + + +

+ + +
+ +

NN-Ising Interaction, N=(I-Z)/2.

+
+ +
+ +
+ + + +

+ XY = 'XY' + + + class-attribute + instance-attribute + + +

+ + +
+ +

XY Interaction.

+
+ +
+ +
+ + + +

+ XYZ = 'XYZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

XYZ Interaction.

+
+ +
+ +
+ + + +

+ ZZ = 'ZZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

ZZ-Ising Interaction.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ LTSOrder + + +

+ + +
+

+ Bases: StrEnum

+ + +

Lie-Trotter-Suzuki approximation order.

+ + + + +
+ + + + + + + +
+ + + +

+ BASIC = 'BASIC' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Basic.

+
+ +
+ +
+ + + +

+ ST2 = 'ST2' + + + class-attribute + instance-attribute + + +

+ + +
+ +

ST2.

+
+ +
+ +
+ + + +

+ ST4 = 'ST4' + + + class-attribute + instance-attribute + + +

+ + +
+ +

ST4.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ LatticeTopology + + +

+ + +
+

+ Bases: StrEnum

+ + +

Lattice topologies to choose from for the register.

+ + + + +
+ + + + + + + +
+ + + +

+ ALL_TO_ALL = 'all_to_all' + + + class-attribute + instance-attribute + + +

+ + +
+ +

All to all- connected lattice.

+
+ +
+ +
+ + + +

+ ARBITRARY = 'arbitrary' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Arbitrarily-shaped lattice.

+
+ +
+ +
+ + + +

+ CIRCLE = 'circle' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Circular lattice.

+
+ +
+ +
+ + + +

+ HONEYCOMB_LATTICE = 'honeycomb_lattice' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Honeycomb-shaped lattice.

+
+ +
+ +
+ + + +

+ LINE = 'line' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Line-format lattice.

+
+ +
+ +
+ + + +

+ RECTANGULAR_LATTICE = 'rectangular_lattice' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Rectangular-shaped lattice.

+
+ +
+ +
+ + + +

+ SQUARE = 'square' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Square lattice.

+
+ +
+ +
+ + + +

+ TRIANGULAR_LATTICE = 'triangular_lattice' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Triangular-shaped shape.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ MultivariateStrategy + + +

+ + +
+

+ Bases: StrEnum

+ + +

Multivariate strategy for feature maps.

+ + + + +
+ + + + + + + +
+ + + +

+ PARALLEL = 'parallel' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Parallel strategy.

+
+ +
+ +
+ + + +

+ SERIES = 'SERIES' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Serial strategy.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ObservableTransform + + +

+ + +
+ + +

Observable transformation type.

+ + + + +
+ + + + + + + +
+ + + +

+ NONE = 'none' + + + class-attribute + instance-attribute + + +

+ + +
+ +

No transformation.

+
+ +
+ +
+ + + +

+ RANGE = 'range' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the given values as min and max.

+
+ +
+ +
+ + + +

+ SCALE = 'scale' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the given values as scale and shift.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ OpName + + +

+ + +
+

+ Bases: StrEnum

+ + +

A list of all available of digital-analog operations.

+ + + + +
+ + + + + + + +
+ + + +

+ ANALOGENTANG = 'AnalogEntanglement' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog entanglement operation.

+
+ +
+ +
+ + + +

+ ANALOGINTERACTION = 'AnalogInteraction' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog interaction operation.

+
+ +
+ +
+ + + +

+ ANALOGRX = 'AnalogRX' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog RX operation.

+
+ +
+ +
+ + + +

+ ANALOGRY = 'AnalogRY' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog RY operation.

+
+ +
+ +
+ + + +

+ ANALOGRZ = 'AnalogRZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog RZ operation.

+
+ +
+ +
+ + + +

+ ANALOGSWAP = 'AnalogSWAP' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The analog SWAP operation.

+
+ +
+ +
+ + + +

+ CNOT = 'CNOT' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The CNOT gate.

+
+ +
+ +
+ + + +

+ CPHASE = 'CPHASE' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The controlled PHASE gate.

+
+ +
+ +
+ + + +

+ CRX = 'CRX' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Control RX gate.

+
+ +
+ +
+ + + +

+ CRY = 'CRY' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Controlled RY gate.

+
+ +
+ +
+ + + +

+ CRZ = 'CRZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Control RZ gate.

+
+ +
+ +
+ + + +

+ CSWAP = 'CSWAP' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Control SWAP gate.

+
+ +
+ +
+ + + +

+ CZ = 'CZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The CZ gate.

+
+ +
+ +
+ + + +

+ ENTANGLE = 'entangle' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The entanglement operation.

+
+ +
+ +
+ + + +

+ H = 'H' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Hadamard gate.

+
+ +
+ +
+ + + +

+ HAMEVO = 'HamEvo' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Hamiltonian Evolution operation.

+
+ +
+ +
+ + + +

+ I = 'I' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Identity gate.

+
+ +
+ +
+ + + +

+ MCPHASE = 'MCPHASE' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Multicontrol PHASE gate.

+
+ +
+ +
+ + + +

+ MCRX = 'MCRX' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Multicontrol RX gate.

+
+ +
+ +
+ + + +

+ MCRY = 'MCRY' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Multicontrol RY gate.

+
+ +
+ +
+ + + +

+ MCRZ = 'MCRZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Multicontrol RZ gate.

+
+ +
+ +
+ + + +

+ MCZ = 'MCZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Multicontrol CZ gate.

+
+ +
+ +
+ + + +

+ N = 'N' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The N = (1/2)(I-Z) operator.

+
+ +
+ +
+ + + +

+ PHASE = 'PHASE' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The PHASE gate.

+
+ +
+ +
+ + + +

+ PROJ = 'Projector' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The projector operation.

+
+ +
+ +
+ + + +

+ RX = 'RX' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The RX gate.

+
+ +
+ +
+ + + +

+ RY = 'RY' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The RY gate.

+
+ +
+ +
+ + + +

+ RZ = 'RZ' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The RZ gate.

+
+ +
+ +
+ + + +

+ S = 'S' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The S gate.

+
+ +
+ +
+ + + +

+ SDAGGER = 'SDagger' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The S dagger gate.

+
+ +
+ +
+ + + +

+ SWAP = 'SWAP' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The SWAP gate.

+
+ +
+ +
+ + + +

+ T = 'T' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The T gate.

+
+ +
+ +
+ + + +

+ TDAGGER = 'TDagger' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The T dagger gate.

+
+ +
+ +
+ + + +

+ TOFFOLI = 'Toffoli' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Toffoli gate.

+
+ +
+ +
+ + + +

+ U = 'U' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The U gate.

+
+ +
+ +
+ + + +

+ X = 'X' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The X gate.

+
+ +
+ +
+ + + +

+ Y = 'Y' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Y gate.

+
+ +
+ +
+ + + +

+ Z = 'Z' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Z gate.

+
+ +
+ +
+ + + +

+ ZERO = 'Zero' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The zero gate.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ OverlapMethod + + +

+ + +
+

+ Bases: StrEnum

+ + +

Overlap Methods to choose from.

+ + + + +
+ + + + + + + +
+ + + +

+ COMPUTE_UNCOMPUTE = 'compute_uncompute' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Compute-uncompute.

+
+ +
+ +
+ + + +

+ EXACT = 'exact' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Exact.

+
+ +
+ +
+ + + +

+ HADAMARD_TEST = 'hadamard_test' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Hadamard-test.

+
+ +
+ +
+ + + +

+ JENSEN_SHANNON = 'jensen_shannon' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Jensen-shannon.

+
+ +
+ +
+ + + +

+ SWAP_TEST = 'swap_test' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Swap-test.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ParameterType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Parameter types available in qadence.

+ + + + +
+ + + + + + + +
+ + + +

+ FEATURE = 'Feature' + + + class-attribute + instance-attribute + + +

+ + +
+ +

FeatureParameters act as input and are not trainable.

+
+ +
+ +
+ + + +

+ FIXED = 'Fixed' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Fixed/ constant parameters are neither trainable nor act as input.

+
+ +
+ +
+ + + +

+ VARIATIONAL = 'Variational' + + + class-attribute + instance-attribute + + +

+ + +
+ +

VariationalParameters are trainable.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ QubitSupportType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Qubit support types.

+ + + + +
+ + + + + + + +
+ + + +

+ GLOBAL = 'global' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use global qubit support.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ResultType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Available data types for generating certain results.

+ + + + +
+ + + + + + + +
+ + + +

+ NUMPY = 'Numpy' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Numpy Array Type.

+
+ +
+ +
+ + + +

+ STRING = 'String' + + + class-attribute + instance-attribute + + +

+ + +
+ +

String Type.

+
+ +
+ +
+ + + +

+ TORCH = 'Torch' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Torch Tensor Type.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ ReuploadScaling + + +

+ + +
+

+ Bases: StrEnum

+ + +

Scaling for data reuploads in feature maps.

+ + + + +
+ + + + + + + +
+ + + +

+ CONSTANT = 'Constant' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Constant scaling.

+
+ +
+ +
+ + + +

+ EXP = 'Exponential' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Exponentially increasing scaling.

+
+ +
+ +
+ + + +

+ TOWER = 'Tower' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Linearly increasing scaling.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ SerializationFormat + + +

+ + +
+

+ Bases: StrEnum

+ + +

Available serialization formats for circuits.

+ + + + +
+ + + + + + + +
+ + + +

+ JSON = 'JSON' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The Json format.

+
+ +
+ +
+ + + +

+ PT = 'PT' + + + class-attribute + instance-attribute + + +

+ + +
+ +

The PT format used by Torch.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ StateGeneratorType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Methods to generate random states.

+ + + + +
+ + + + + + + +
+ + + +

+ HAAR_MEASURE_FAST = 'HaarMeasureFast' + + + class-attribute + instance-attribute + + +

+ + +
+ +

HaarMeasure.

+
+ +
+ +
+ + + +

+ HAAR_MEASURE_SLOW = 'HaarMeasureSlow' + + + class-attribute + instance-attribute + + +

+ + +
+ +

HaarMeasure non-optimized version.

+
+ +
+ +
+ + + +

+ RANDOM_ROTATIONS = 'RandomRotations' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Random Rotations.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ StrEnum + + +

+ + +
+

+ Bases: str, Enum

+ + + + + +
+ + + + + + + + + +
+ + +

+ __str__() + +

+ + +
+ +

Used when dumping enum fields in a schema.

+ +
+ Source code in qadence/types.py +
58
+59
+60
+61
def __str__(self) -> str:
+    """Used when dumping enum fields in a schema."""
+    ret: str = self.value
+    return ret
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Strategy + + +

+ + +
+

+ Bases: StrEnum

+ + +

Computing paradigm.

+ + + + +
+ + + + + + + +
+ + + +

+ ANALOG = 'Analog' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the analog paradigm.

+
+ +
+ +
+ + + +

+ BDAQC = 'bDAQC' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the banged digital-analog QC paradigm.

+
+ +
+ +
+ + + +

+ DIGITAL = 'Digital' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the digital paradigm.

+
+ +
+ +
+ + + +

+ RYDBERG = 'Rydberg' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the Rydberg QC paradigm.

+
+ +
+ +
+ + + +

+ SDAQC = 'sDAQC' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Use the step-wise digital-analog QC paradigm.

+
+ +
+ + + + + +
+ +
+ +
+ +
+ + + +

+ TensorType + + +

+ + +
+

+ Bases: StrEnum

+ + +

Tensor Types for converting blocks to tensors.

+ + + + +
+ + + + + + + +
+ + + +

+ DENSE = 'Dense' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Convert a block to a dense tensor.

+
+ +
+ +
+ + + +

+ SPARSE = 'Sparse' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Convert a observable block to a sparse tensor.

+
+ +
+ +
+ + + +

+ SPARSEDIAGONAL = 'SparseDiagonal' + + + class-attribute + instance-attribute + + +

+ + +
+ +

Convert a diagonal observable block to a sparse diagonal if possible.

+
+ +
+ + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.7.4/assets/_markdown_exec_pyodide.css b/v1.7.4/assets/_markdown_exec_pyodide.css new file mode 100644 index 000000000..71f9f2852 --- /dev/null +++ b/v1.7.4/assets/_markdown_exec_pyodide.css @@ -0,0 +1,50 @@ +html[data-theme="light"] { + @import "https://cdn.jsdelivr.net/npm/highlightjs-themes@1.0.0/tomorrow.css" +} + +html[data-theme="dark"] { + @import "https://cdn.jsdelivr.net/npm/highlightjs-themes@1.0.0/tomorrow-night-blue.min.css" +} + + +.ace_gutter { + z-index: 1; +} + +.pyodide-editor { + width: 100%; + min-height: 200px; + max-height: 400px; + font-size: .85em; +} + +.pyodide-editor-bar { + color: var(--md-primary-bg-color); + background-color: var(--md-primary-fg-color); + width: 100%; + font: monospace; + font-size: 0.75em; + padding: 2px 0 2px; +} + +.pyodide-bar-item { + padding: 0 18px 0; + display: inline-block; + width: 50%; +} + +.pyodide pre { + margin: 0; +} + +.pyodide-output { + width: 100%; + margin-bottom: -15px; + min-height: 46px; + max-height: 400px +} + +.pyodide-clickable { + cursor: pointer; + text-align: right; +} \ No newline at end of file diff --git a/v1.7.4/assets/_markdown_exec_pyodide.js b/v1.7.4/assets/_markdown_exec_pyodide.js new file mode 100644 index 000000000..1f6ae91b6 --- /dev/null +++ b/v1.7.4/assets/_markdown_exec_pyodide.js @@ -0,0 +1,109 @@ +var _sessions = {}; + +function getSession(name, pyodide) { + if (!(name in _sessions)) { + _sessions[name] = pyodide.globals.get("dict")(); + } + return _sessions[name]; +} + +function writeOutput(element, string) { + element.innerHTML += string + '\n'; +} + +function clearOutput(element) { + element.innerHTML = ''; +} + +async function evaluatePython(pyodide, editor, output, session) { + pyodide.setStdout({ batched: (string) => { writeOutput(output, string); } }); + let result, code = editor.getValue(); + clearOutput(output); + try { + result = await pyodide.runPythonAsync(code, { globals: getSession(session, pyodide) }); + } catch (error) { + writeOutput(output, error); + } + if (result) writeOutput(output, result); + hljs.highlightElement(output); +} + +async function initPyodide() { + try { + let pyodide = await loadPyodide(); + await pyodide.loadPackage("micropip"); + return pyodide; + } catch(error) { + return null; + } +} + +function getTheme() { + return document.body.getAttribute('data-md-color-scheme'); +} + +function setTheme(editor, currentTheme, light, dark) { + // https://gist.github.com/RyanNutt/cb8d60997d97905f0b2aea6c3b5c8ee0 + if (currentTheme === "default") { + editor.setTheme("ace/theme/" + light); + document.querySelector(`link[title="light"]`).removeAttribute("disabled"); + document.querySelector(`link[title="dark"]`).setAttribute("disabled", "disabled"); + } else if (currentTheme === "slate") { + editor.setTheme("ace/theme/" + dark); + document.querySelector(`link[title="dark"]`).removeAttribute("disabled"); + document.querySelector(`link[title="light"]`).setAttribute("disabled", "disabled"); + } +} + +function updateTheme(editor, light, dark) { + // Create a new MutationObserver instance + const observer = new MutationObserver((mutations) => { + // Loop through the mutations that occurred + mutations.forEach((mutation) => { + // Check if the mutation was a change to the data-md-color-scheme attribute + if (mutation.attributeName === 'data-md-color-scheme') { + // Get the new value of the attribute + const newColorScheme = mutation.target.getAttribute('data-md-color-scheme'); + // Update the editor theme + setTheme(editor, newColorScheme, light, dark); + } + }); + }); + + // Configure the observer to watch for changes to the data-md-color-scheme attribute + observer.observe(document.body, { + attributes: true, + attributeFilter: ['data-md-color-scheme'], + }); +} + +async function setupPyodide(idPrefix, install = null, themeLight = 'tomorrow', themeDark = 'tomorrow_night', session = null) { + const editor = ace.edit(idPrefix + "editor"); + const run = document.getElementById(idPrefix + "run"); + const clear = document.getElementById(idPrefix + "clear"); + const output = document.getElementById(idPrefix + "output"); + + updateTheme(editor, themeLight, themeDark); + + editor.session.setMode("ace/mode/python"); + setTheme(editor, getTheme(), themeLight, themeDark); + + writeOutput(output, "Initializing..."); + let pyodide = await pyodidePromise; + if (install && install.length) { + micropip = pyodide.pyimport("micropip"); + for (const package of install) + await micropip.install(package); + } + clearOutput(output); + run.onclick = () => evaluatePython(pyodide, editor, output, session); + clear.onclick = () => clearOutput(output); + output.parentElement.parentElement.addEventListener("keydown", (event) => { + if (event.ctrlKey && event.key.toLowerCase() === 'enter') { + event.preventDefault(); + run.click(); + } + }); +} + +var pyodidePromise = initPyodide(); diff --git a/v1.7.4/assets/_mkdocstrings.css b/v1.7.4/assets/_mkdocstrings.css new file mode 100644 index 000000000..85449ec79 --- /dev/null +++ b/v1.7.4/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/v1.7.4/assets/images/favicon.png b/v1.7.4/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/v1.7.4/assets/images/favicon.png differ diff --git a/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js b/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 000000000..cf778d428 --- /dev/null +++ b/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js.map b/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 000000000..82635852a --- /dev/null +++ b/v1.7.4/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an