From 111606ea9fd4e48d2d033ee2cfd6ff603b6fda39 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 12:12:49 -0400 Subject: [PATCH] Daily rc sync to master (#4281) * Support `HardwareHamiltonian` pulses in `stoch_pulse_grad` (#4215) * single out gradient transform checks * rename stochastic pulse gradient file * unify gradient_analysis and grad_method_validation * continue restructure of analysis+validation * CV * black * modularize more * more modularizing * black * tiny [skip ci] * [skip ci] lint * remove dummy test * test fix * add test file to linting test file * test fixes, docstrings * code review * docstring gradient_analysis_and_grad_method_validation * move first fun * code review:move functions * test regex * regexs * move and promote reorder_grads * tmp * more tmp * test cases, contractions * lint * docstring * even more tmp * cleanup * black * tmp * lint * move stoch_pulse_gradient.. files back to pulse_gradient... * move stoch_pulse_gradient.. files back to pulse_gradient... * lint * rename * extend functions and tests * lint and black * changelog * improve * update example to include non-Pauli word generator * add jit test with pauli sentence * tmp * debugging, docstring, extend test * review * optimize for Pauli words * Apply suggestions from code review Co-authored-by: Romain Moyard * test cases code review * fix parametrization * drafting * working prototype * finish merge; cleanup * changelog * comments * [skip ci] * raising an error; cleanup [skip ci] * Apply suggestions from code review Co-authored-by: Korbinian Kottmann <43949391+Qottmann@users.noreply.github.com> * change contraction idea * typo in docs * tests * remove prints * fix test * test descriptions * fix merge * format * code review; test coverage * coverage reordering * fix * trigger CI * trigger * clear caches * trigger * trigger --------- Co-authored-by: Romain Moyard Co-authored-by: Korbinian Kottmann <43949391+Qottmann@users.noreply.github.com> Co-authored-by: Korbinian Kottmann * Fix batching of derivative tapes in autograd (#4245) * Fix `expval` of `Sum` with broadcasting (#4275) * fix bug and add test * changelog addition * Various doc fixes (#4268) * exclude files from pr --------- Co-authored-by: David Wierichs Co-authored-by: Romain Moyard Co-authored-by: Korbinian Kottmann <43949391+Qottmann@users.noreply.github.com> Co-authored-by: Korbinian Kottmann Co-authored-by: Christina Lee Co-authored-by: Edward Jiang <34989448+eddddddy@users.noreply.github.com> Co-authored-by: GitHub Actions Bot <> Co-authored-by: Matthew Silverman --- doc/releases/changelog-0.31.0.md | 1 + pennylane/devices/default_qubit.py | 9 ++-- pennylane/pauli/utils.py | 28 +++++++----- pennylane/transforms/core/transform.py | 6 +-- .../decompositions/single_qubit_unitary.py | 6 +-- tests/devices/test_default_qubit.py | 45 +++++++++++-------- 6 files changed, 58 insertions(+), 37 deletions(-) diff --git a/doc/releases/changelog-0.31.0.md b/doc/releases/changelog-0.31.0.md index f00da904469..4213ac92337 100644 --- a/doc/releases/changelog-0.31.0.md +++ b/doc/releases/changelog-0.31.0.md @@ -447,6 +447,7 @@ * Allow for `Sum` observables with trainable parameters. [(#4251)](https://github.com/PennyLaneAI/pennylane/pull/4251) + [(#4275)](https://github.com/PennyLaneAI/pennylane/pull/4275)

Contributors ✍️

diff --git a/pennylane/devices/default_qubit.py b/pennylane/devices/default_qubit.py index 35a0a5da1c8..ed33195fbae 100644 --- a/pennylane/devices/default_qubit.py +++ b/pennylane/devices/default_qubit.py @@ -570,10 +570,13 @@ def expval(self, observable, shot_range=None, bin_size=None): Hamiltonian is not NumPy or Autograd """ + is_state_batched = self._ndim(self.state) == 2 # intercept Sums if isinstance(observable, Sum) and not self.shots: return measure( - ExpectationMP(observable.map_wires(self.wire_map)), self._pre_rotated_state + ExpectationMP(observable.map_wires(self.wire_map)), + self._pre_rotated_state, + is_state_batched, ) # intercept other Hamiltonians @@ -592,7 +595,7 @@ def expval(self, observable, shot_range=None, bin_size=None): if backprop_mode: # TODO[dwierichs]: This branch is not adapted to broadcasting yet - if self._ndim(self.state) == 2: + if is_state_batched: raise NotImplementedError( "Expectation values of Hamiltonians for interface!=None are " "not supported together with parameter broadcasting yet" @@ -632,7 +635,7 @@ def expval(self, observable, shot_range=None, bin_size=None): Hmat = observable.sparse_matrix(wire_order=self.wires) state = qml.math.toarray(self.state) - if self._ndim(state) == 2: + if is_state_batched: res = qml.math.array( [ csr_matrix.dot( diff --git a/pennylane/pauli/utils.py b/pennylane/pauli/utils.py index 5ba806d7912..b020737df3e 100644 --- a/pennylane/pauli/utils.py +++ b/pennylane/pauli/utils.py @@ -51,8 +51,7 @@ def _wire_map_from_pauli_pair(pauli_word_1, pauli_word_2): return {label: i for i, label in enumerate(wire_labels)} -@singledispatch -def is_pauli_word(observable): # pylint:disable=unused-argument +def is_pauli_word(observable): """ Checks if an observable instance consists only of Pauli and Identity Operators. @@ -93,36 +92,45 @@ def is_pauli_word(observable): # pylint:disable=unused-argument >>> is_pauli_word(4 * qml.PauliX(0) @ qml.PauliZ(0)) True """ + return _is_pauli_word(observable) + + +@singledispatch +def _is_pauli_word(observable): # pylint:disable=unused-argument + """ + Private implementation of is_pauli_word, to prevent all of the + registered functions from appearing in the Sphinx docs. + """ return False -@is_pauli_word.register(PauliX) -@is_pauli_word.register(PauliY) -@is_pauli_word.register(PauliZ) -@is_pauli_word.register(Identity) +@_is_pauli_word.register(PauliX) +@_is_pauli_word.register(PauliY) +@_is_pauli_word.register(PauliZ) +@_is_pauli_word.register(Identity) def _is_pw_pauli( observable: Union[PauliX, PauliY, PauliZ, Identity] ): # pylint:disable=unused-argument return True -@is_pauli_word.register +@_is_pauli_word.register def _is_pw_tensor(observable: Tensor): pauli_word_names = ["Identity", "PauliX", "PauliY", "PauliZ"] return set(observable.name).issubset(pauli_word_names) -@is_pauli_word.register +@_is_pauli_word.register def _is_pw_ham(observable: Hamiltonian): return False if len(observable.ops) != 1 else is_pauli_word(observable.ops[0]) -@is_pauli_word.register +@_is_pauli_word.register def _is_pw_prod(observable: Prod): return all(is_pauli_word(op) for op in observable) -@is_pauli_word.register +@_is_pauli_word.register def _is_pw_sprod(observable: SProd): return is_pauli_word(observable.base) diff --git a/pennylane/transforms/core/transform.py b/pennylane/transforms/core/transform.py index 85bba6262f7..bc2736da9a2 100644 --- a/pennylane/transforms/core/transform.py +++ b/pennylane/transforms/core/transform.py @@ -58,9 +58,9 @@ def post_processing_fn(results): return [tape1, tape2], post_processing_fn - Of course, we want to be able to apply this transform on `qfunc` and `qnodes`. That's where the `transform` function + Of course, we want to be able to apply this transform on ``qfunc`` and ``qnodes``. That's where the ``transform`` function comes into play. This function validates the signature of your quantum transform and dispatches it on the different - object. Let's define a circuit as a qfunc and as qnode. + object. Let's define a circuit as a qfunc and as a qnode. .. code-block:: python @@ -85,7 +85,7 @@ def qnode_circuit(a): Now you can use the dispatched transform directly on qfunc and qnodes. - For QNodes, the dispatched transform populates the `TransformProgram` of your QNode. The transform and its + For QNodes, the dispatched transform populates the ``TransformProgram`` of your QNode. The transform and its processing function are applied in the execution. >>> transformed_qnode = dispatched_transform(qfunc_circuit) diff --git a/pennylane/transforms/decompositions/single_qubit_unitary.py b/pennylane/transforms/decompositions/single_qubit_unitary.py index 6ffba710d5e..fee334cc910 100644 --- a/pennylane/transforms/decompositions/single_qubit_unitary.py +++ b/pennylane/transforms/decompositions/single_qubit_unitary.py @@ -417,10 +417,10 @@ def _zxz_decomposition(U, wire, return_global_phase=False): def one_qubit_decomposition(U, wire, rotations="ZYZ", return_global_phase=False): r"""Decompose a one-qubit unitary :math:`U` in terms of elementary operations. (batched operation) - Any one qubit unitary operation can be implemented upto a global phase by composing RX, RY, + Any one qubit unitary operation can be implemented up to a global phase by composing RX, RY, and RZ gates. - Currently supported values for `rotations` are "ZYZ", "XYX", and "ZXZ". + Currently supported values for ``rotations`` are "ZYZ", "XYX", and "ZXZ". Args: U (tensor): A :math:`2 \times 2` unitary matrix. @@ -431,7 +431,7 @@ def one_qubit_decomposition(U, wire, rotations="ZYZ", return_global_phase=False) Returns: list[Operation]: Returns a list of gates which when applied in the order of appearance in - the list is equivalent to the unitary :math:`U` up to a global phase. If `return_global_phase=True`, + the list is equivalent to the unitary :math:`U` up to a global phase. If ``return_global_phase=True``, the global phase is returned as the last element of the list. **Example** diff --git a/tests/devices/test_default_qubit.py b/tests/devices/test_default_qubit.py index 85a2ef86781..52788c14ba6 100644 --- a/tests/devices/test_default_qubit.py +++ b/tests/devices/test_default_qubit.py @@ -19,6 +19,7 @@ # pylint: disable=protected-access,cell-var-from-loop import math +from functools import partial import pytest import pennylane as qml @@ -2361,14 +2362,20 @@ def test_Hamiltonian_filtered_from_rotations(self, mocker): assert qml.equal(call_args.measurements[0], qml.expval(qml.PauliX(0))) +@pytest.mark.parametrize("is_state_batched", [False, True]) class TestSumSupport: """Tests for custom Sum support in DefaultQubit.""" - expected_grad = [-np.sin(1.3), np.cos(1.3)] + @staticmethod + def expected_grad(is_state_batched): + if is_state_batched: + return [[-np.sin(1.3), -np.sin(0.4)], [np.cos(1.3), np.cos(0.4)]] + return [-np.sin(1.3), np.cos(1.3)] @staticmethod - def circuit(y, z): - qml.RX(1.3, 0) + def circuit(y, z, is_state_batched): + rx_param = [1.3, 0.4] if is_state_batched else 1.3 + qml.RX(rx_param, 0) return qml.expval( qml.sum( qml.s_prod(y, qml.PauliY(0)), @@ -2376,7 +2383,7 @@ def circuit(y, z): ) ) - def test_super_expval_not_called(self, mocker): + def test_super_expval_not_called(self, is_state_batched, mocker): """Tests basic expval result, and ensures QubitDevice.expval is not called.""" dev = qml.device("default.qubit", wires=1) spy = mocker.spy(qml.QubitDevice, "expval") @@ -2385,28 +2392,30 @@ def test_super_expval_not_called(self, mocker): spy.assert_not_called() @pytest.mark.autograd - def test_trainable_autograd(self): + def test_trainable_autograd(self, is_state_batched): """Tests that coeffs passed to a sum are trainable with autograd.""" + if is_state_batched: + pytest.skip(msg="Broadcasting, qml.jacobian and new return types do not work together") dev = qml.device("default.qubit", wires=1) qnode = qml.QNode(self.circuit, dev, interface="autograd") y, z = np.array([1.1, 2.2]) - actual = qml.grad(qnode)(y, z) - assert np.allclose(actual, self.expected_grad) + actual = qml.grad(qnode, argnum=[0, 1])(y, z, is_state_batched) + assert np.allclose(actual, self.expected_grad(is_state_batched)) @pytest.mark.torch - def test_trainable_torch(self): + def test_trainable_torch(self, is_state_batched): """Tests that coeffs passed to a sum are trainable with torch.""" import torch dev = qml.device("default.qubit", wires=1) qnode = qml.QNode(self.circuit, dev, interface="torch") y, z = torch.tensor(1.1, requires_grad=True), torch.tensor(2.2, requires_grad=True) - qnode(y, z).backward() - actual = [y.grad, z.grad] - assert np.allclose(actual, self.expected_grad) + _qnode = partial(qnode, is_state_batched=is_state_batched) + actual = torch.stack(torch.autograd.functional.jacobian(_qnode, (y, z))) + assert np.allclose(actual, self.expected_grad(is_state_batched)) @pytest.mark.tf - def test_trainable_tf(self): + def test_trainable_tf(self, is_state_batched): """Tests that coeffs passed to a sum are trainable with tf.""" import tensorflow as tf @@ -2414,20 +2423,20 @@ def test_trainable_tf(self): qnode = qml.QNode(self.circuit, dev, interface="tensorflow") y, z = tf.Variable(1.1, dtype=tf.float64), tf.Variable(2.2, dtype=tf.float64) with tf.GradientTape() as tape: - res = qnode(y, z) - actual = tape.gradient(res, [y, z]) - assert np.allclose(actual, self.expected_grad) + res = qnode(y, z, is_state_batched) + actual = tape.jacobian(res, [y, z]) + assert np.allclose(actual, self.expected_grad(is_state_batched)) @pytest.mark.jax - def test_trainable_jax(self): + def test_trainable_jax(self, is_state_batched): """Tests that coeffs passed to a sum are trainable with jax.""" import jax dev = qml.device("default.qubit", wires=1) qnode = qml.QNode(self.circuit, dev, interface="jax") y, z = jax.numpy.array([1.1, 2.2]) - actual = jax.grad(qnode, argnums=[0, 1])(y, z) - assert np.allclose(actual, self.expected_grad) + actual = jax.jacobian(qnode, argnums=[0, 1])(y, z, is_state_batched) + assert np.allclose(actual, self.expected_grad(is_state_batched)) class TestGetBatchSize: