diff --git a/qiskit/algorithms/eigensolvers/vqd.py b/qiskit/algorithms/eigensolvers/vqd.py index a3e4020338d0..caf1113e3b9e 100644 --- a/qiskit/algorithms/eigensolvers/vqd.py +++ b/qiskit/algorithms/eigensolvers/vqd.py @@ -38,6 +38,9 @@ from ..exceptions import AlgorithmError from ..observables_evaluator import estimate_observables +# private function as we expect this to be updated in the next release +from ..utils.set_batching import _set_default_batchsize + logger = logging.getLogger(__name__) @@ -264,10 +267,18 @@ def compute_eigenvalues( fun=energy_evaluation, x0=initial_point, bounds=bounds ) else: + # we always want to submit as many estimations per job as possible for minimal + # overhead on the hardware + was_updated = _set_default_batchsize(self.optimizer) + opt_result = self.optimizer.minimize( fun=energy_evaluation, x0=initial_point, bounds=bounds ) + # reset to original value + if was_updated: + self.optimizer.set_max_evals_grouped(None) + eval_time = time() - start_time self._update_vqd_result(result, opt_result, eval_time, self.ansatz.copy()) diff --git a/qiskit/algorithms/minimum_eigensolvers/sampling_vqe.py b/qiskit/algorithms/minimum_eigensolvers/sampling_vqe.py index cfa19eb7b6cf..711f93e60a4c 100755 --- a/qiskit/algorithms/minimum_eigensolvers/sampling_vqe.py +++ b/qiskit/algorithms/minimum_eigensolvers/sampling_vqe.py @@ -39,6 +39,9 @@ from ..observables_evaluator import estimate_observables from ..utils import validate_initial_point, validate_bounds +# private function as we expect this to be updated in the next released +from ..utils.set_batching import _set_default_batchsize + logger = logging.getLogger(__name__) @@ -208,10 +211,18 @@ def compute_minimum_eigenvalue( # pylint: disable=not-callable optimizer_result = self.optimizer(fun=evaluate_energy, x0=initial_point, bounds=bounds) else: + # we always want to submit as many estimations per job as possible for minimal + # overhead on the hardware + was_updated = _set_default_batchsize(self.optimizer) + optimizer_result = self.optimizer.minimize( fun=evaluate_energy, x0=initial_point, bounds=bounds ) + # reset to original value + if was_updated: + self.optimizer.set_max_evals_grouped(None) + optimizer_time = time() - start_time logger.info( diff --git a/qiskit/algorithms/minimum_eigensolvers/vqe.py b/qiskit/algorithms/minimum_eigensolvers/vqe.py index 4c01ddc26191..266637253911 100644 --- a/qiskit/algorithms/minimum_eigensolvers/vqe.py +++ b/qiskit/algorithms/minimum_eigensolvers/vqe.py @@ -35,6 +35,9 @@ from ..observables_evaluator import estimate_observables from ..utils import validate_initial_point, validate_bounds +# private function as we expect this to be updated in the next released +from ..utils.set_batching import _set_default_batchsize + logger = logging.getLogger(__name__) @@ -181,10 +184,18 @@ def compute_minimum_eigenvalue( fun=evaluate_energy, x0=initial_point, jac=evaluate_gradient, bounds=bounds ) else: + # we always want to submit as many estimations per job as possible for minimal + # overhead on the hardware + was_updated = _set_default_batchsize(self.optimizer) + optimizer_result = self.optimizer.minimize( fun=evaluate_energy, x0=initial_point, jac=evaluate_gradient, bounds=bounds ) + # reset to original value + if was_updated: + self.optimizer.set_max_evals_grouped(None) + optimizer_time = time() - start_time logger.info( diff --git a/qiskit/algorithms/optimizers/optimizer.py b/qiskit/algorithms/optimizers/optimizer.py index 6f2e4e1077f9..3ade5b08fb3a 100644 --- a/qiskit/algorithms/optimizers/optimizer.py +++ b/qiskit/algorithms/optimizers/optimizer.py @@ -180,7 +180,7 @@ def __init__(self): self._bounds_support_level = self.get_support_level()["bounds"] self._initial_point_support_level = self.get_support_level()["initial_point"] self._options = {} - self._max_evals_grouped = 1 + self._max_evals_grouped = None @abstractmethod def get_support_level(self): @@ -205,7 +205,7 @@ def set_options(self, **kwargs): # pylint: disable=invalid-name @staticmethod - def gradient_num_diff(x_center, f, epsilon, max_evals_grouped=1): + def gradient_num_diff(x_center, f, epsilon, max_evals_grouped=None): """ We compute the gradient with the numeric differentiation in the parallel way, around the point x_center. @@ -214,11 +214,14 @@ def gradient_num_diff(x_center, f, epsilon, max_evals_grouped=1): x_center (ndarray): point around which we compute the gradient f (func): the function of which the gradient is to be computed. epsilon (float): the epsilon used in the numeric differentiation. - max_evals_grouped (int): max evals grouped + max_evals_grouped (int): max evals grouped, defaults to 1 (i.e. no batching). Returns: grad: the gradient computed """ + if max_evals_grouped is None: # no batching by default + max_evals_grouped = 1 + forig = f(*((x_center,))) grad = [] ei = np.zeros((len(x_center),), float) diff --git a/qiskit/algorithms/optimizers/spsa.py b/qiskit/algorithms/optimizers/spsa.py index a7f4f93efdaa..ac70f8a0a6fe 100644 --- a/qiskit/algorithms/optimizers/spsa.py +++ b/qiskit/algorithms/optimizers/spsa.py @@ -719,7 +719,7 @@ def _batch_evaluate(function, points, max_evals_grouped, unpack_points=False): """ # if the function cannot handle lists of points as input, cover this case immediately - if max_evals_grouped == 1: + if max_evals_grouped is None or max_evals_grouped == 1: # support functions with multiple arguments where the points are given in a tuple return [ function(*point) if isinstance(point, tuple) else function(point) for point in points diff --git a/qiskit/algorithms/utils/set_batching.py b/qiskit/algorithms/utils/set_batching.py new file mode 100644 index 000000000000..225f50a6fed8 --- /dev/null +++ b/qiskit/algorithms/utils/set_batching.py @@ -0,0 +1,27 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2022. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Set default batch sizes for the optimizers.""" + +from qiskit.algorithms.optimizers import Optimizer, SPSA + + +def _set_default_batchsize(optimizer: Optimizer) -> bool: + """Set the default batchsize, if None is set and return whether it was updated or not.""" + if isinstance(optimizer, SPSA): + updated = optimizer._max_evals_grouped is None + if updated: + optimizer.set_max_evals_grouped(50) + else: # we only set a batchsize for SPSA + updated = False + + return updated diff --git a/releasenotes/notes/fix-vqe-default-batching-eb08e6ce17907da3.yaml b/releasenotes/notes/fix-vqe-default-batching-eb08e6ce17907da3.yaml new file mode 100644 index 000000000000..8ea2178ed722 --- /dev/null +++ b/releasenotes/notes/fix-vqe-default-batching-eb08e6ce17907da3.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a performance bug where the new primitive-based variational algorithms + :class:`.minimum_eigensolvers.VQE`, :class:`.eigensolvers.VQD` and :class:`.SamplingVQE` + did not batch energy evaluations per default, which resulted in a significant slowdown + if a hardware backend was used. \ No newline at end of file diff --git a/test/python/algorithms/minimum_eigensolvers/test_vqe.py b/test/python/algorithms/minimum_eigensolvers/test_vqe.py index c3c0b89d9cce..31a55d27d438 100644 --- a/test/python/algorithms/minimum_eigensolvers/test_vqe.py +++ b/test/python/algorithms/minimum_eigensolvers/test_vqe.py @@ -300,6 +300,35 @@ def run_check(): vqe.optimizer = L_BFGS_B() run_check() + def test_default_batch_evaluation_on_spsa(self): + """Test the default batching works.""" + ansatz = TwoLocal(2, rotation_blocks=["ry", "rz"], entanglement_blocks="cz") + + wrapped_estimator = Estimator() + inner_estimator = Estimator() + + callcount = {"estimator": 0} + + def wrapped_estimator_run(*args, **kwargs): + kwargs["callcount"]["estimator"] += 1 + return inner_estimator.run(*args, **kwargs) + + wrapped_estimator.run = partial(wrapped_estimator_run, callcount=callcount) + + spsa = SPSA(maxiter=5) + + vqe = VQE(wrapped_estimator, ansatz, spsa) + _ = vqe.compute_minimum_eigenvalue(Pauli("ZZ")) + + # 1 calibration + 5 loss + 1 return loss + expected_estimator_runs = 1 + 5 + 1 + + with self.subTest(msg="check callcount"): + self.assertEqual(callcount["estimator"], expected_estimator_runs) + + with self.subTest(msg="check reset to original max evals grouped"): + self.assertIsNone(spsa._max_evals_grouped) + def test_batch_evaluate_with_qnspsa(self): """Test batch evaluating with QNSPSA works.""" ansatz = TwoLocal(2, rotation_blocks=["ry", "rz"], entanglement_blocks="cz")