Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implementation of pennylane optimizers #101

Merged
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
1c208eb
implement some optimization methods from pennylane
raulconchello Oct 5, 2022
90e68eb
adding the spsa pennylane optimizer
raulconchello Oct 6, 2022
9d5439a
Documentation for pennylane optimizers
raulconchello Oct 12, 2022
a331046
Documentation for pennylane optimizers
raulconchello Oct 12, 2022
3054af6
Added pennylane dependency in setup.py
raulconchello Oct 12, 2022
985674a
Added a test for pennylane optimizers
raulconchello Oct 12, 2022
7e2a022
Documenttion for pennylane optimizers test
raulconchello Oct 12, 2022
27b0ef7
The proposed changes have been modified
raulconchello Oct 13, 2022
ed96166
codecov added
raulconchello Oct 13, 2022
ed9bdb6
Merge branch 'dev' into dev_PennyLane_optimization_methods
raulconchello Oct 19, 2022
9608b5e
Merge branch 'dev' into dev_PennyLane_optimization_methods
raulconchello Oct 20, 2022
115b576
Documentation updated
raulconchello Oct 21, 2022
828a787
Solving bug in optimizers pennylane tests
raulconchello Oct 28, 2022
1394b96
Documentation
raulconchello Oct 28, 2022
f870c2f
Add custm optmzrs in ALLOWED_MINIMIZATION_METHODS
raulconchello Oct 28, 2022
a5ebaff
Change in step computation depending on the method
raulconchello Oct 31, 2022
10e999b
Merge branch 'dev' into dev_PennyLane_optimization_methods
raulconchello Nov 1, 2022
eb3a47e
Making training_vqa (for pennylane) more readable
Nov 8, 2022
1e714c7
Better tests for the pennylane optimizers
Nov 8, 2022
3724e2f
Requirements
Nov 8, 2022
a1f6118
Merge branch 'dev' into dev_PennyLane_optimization_methods
Nov 8, 2022
5e04b2a
Adding CustomScipyPennyLaneOptimizer
Nov 14, 2022
6d55818
Debugging
Nov 15, 2022
1ce7470
PennyLaneOptimizer
Nov 15, 2022
2e3020e
Merge branch 'dev' into dev_PennyLane_optimization_methods
Nov 15, 2022
3a6d320
Creating PennyLane folder
Nov 15, 2022
5557073
Removing the PennyLane requirement
Nov 16, 2022
4473e27
Deleting some prints
Nov 16, 2022
d66524d
Added NOTICE, it specifies code is from PennyLane
Nov 16, 2022
24a64dd
Requirements.txt updated
Nov 16, 2022
3b196fb
Docs optimizers
Nov 16, 2022
c3a7fb7
Debugging
Nov 16, 2022
88a6ac5
Debugging docs
Nov 16, 2022
97f7aae
Debugging
Nov 16, 2022
ce791ee
Merge branch 'dev' into dev_PennyLane_optimization_methods
Nov 16, 2022
98dde82
except Exception as e. In PennyLane opt
Nov 16, 2022
d164b9c
Requirements with >=
Nov 18, 2022
2aebedb
Documentation and copyright
Nov 18, 2022
7f8c55d
Cleaning imports of optimization_methods_pennylane
Nov 18, 2022
d478b53
Documentation
Nov 18, 2022
4fa48d3
Requirements -> autoray>=0.3.1
Nov 18, 2022
9b76633
Requirements -> autoray>=0.3.1
Nov 18, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ sphinx>=4.5.0
sphinx-autodoc-typehints==1.18.1
sphinx-rtd-theme==1.0.0
ipython==8.2.0
pennylane>=0.26.0
6 changes: 6 additions & 0 deletions docs/source/optimizers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@ Optimization Methods
:show-inheritance:
:inherited-members:

.. automodule:: openqaoa.optimizers.optimization_methods_pennylane
:members:
:undoc-members:
:show-inheritance:
:inherited-members:

Derivate functions
------------------
.. automodule:: openqaoa.derivative_functions
Expand Down
160 changes: 160 additions & 0 deletions openqaoa/optimizers/optimization_methods_pennylane.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
# Copyright 2022 Entropica Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Function to implement pennylane optimization algorithms.
raulconchello marked this conversation as resolved.
Show resolved Hide resolved
Read https://docs.pennylane.ai/en/stable/introduction/interfaces.html#optimizers
Only those that don't require a pennylane backend have been implemented.
raulconchello marked this conversation as resolved.
Show resolved Hide resolved
Similarly as with the custom optimization methods Scipy `minimize` is used. Extends available scipy methods.
"""

raulconchello marked this conversation as resolved.
Show resolved Hide resolved
import pennylane as pl
import inspect
from scipy.optimize import OptimizeResult
import numpy as np

AVAILABLE_OPTIMIZERS = { # optimizers implemented
'adagrad': pl.AdagradOptimizer,
'adam': pl.AdamOptimizer,
'vgd': pl.GradientDescentOptimizer,
'momentum': pl.MomentumOptimizer,
'nesterov_momentum': pl.NesterovMomentumOptimizer,
'natural_grad_descent': pl.QNGOptimizer,
'rmsprop': pl.RMSPropOptimizer,
'rotosolve': pl.RotosolveOptimizer,
'spsa': pl.SPSAOptimizer,
}



def pennylane_optimizer(fun, x0, args=(), maxfev=None, method='vgd', qfim=None,
maxiter=100, tol=10**(-6), jac=None, callback=None,
nums_frequency=None, spectra=None, shifts=None, **options):

'''
Minimize a function `fun` using some pennylane method.
To check available methods look at the available_methods_dict variable.
Read https://docs.pennylane.ai/en/stable/introduction/interfaces.html#optimizers

PARAMETERS
----------
fun : callable
Function to minimize
x0 : ndarray
Initial guess.
args : sequence, optional
Arguments to pass to `func`.
maxfev : int, optional
Maximum number of function evaluations.
method : string, optional
Optimizer method to compute the steps.
qfim : callable, optional (required for natural_grad_descent)
Callable Fubini-Study metric tensor
maxiter : int, optional
Maximum number of iterations.
tol : float
Tolerance before the optimizer terminates; if `tol` is larger than the difference between two steps, terminate optimization.
jac : callable, optinal (required for all methods but rotosolve and spsa)
Callable gradient function.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
options : dict, optional
Dictionary where keys are the arguments for the optimizers object, and
the values are the values to pass to these arguments.
To know all the possible argumets read
https://docs.pennylane.ai/en/stable/introduction/interfaces.html#optimizers.

raulconchello marked this conversation as resolved.
Show resolved Hide resolved

(read https://docs.pennylane.ai/en/stable/code/api/pennylane.RotosolveOptimizer.html#pennylane.RotosolveOptimizer.step)
nums_frequency : dict[dict], required for rotosolve
The number of frequencies in the fun per parameter.
spectra : dict[dict], required for rotosolve
Frequency spectra in the objective_fn per parameter.
shifts : dict[dict], required for rotosolve
Shift angles for the reconstruction per parameter.


RETURNS
-------
OptimizeResult : OptimizeResult
Scipy OptimizeResult object.
'''

def cost(params, **k): # define a function to convert the params list from pennylane to numpy
return fun(np.array(params), *k)


optimizer = AVAILABLE_OPTIMIZERS[method] # define the optimizer

#get optimizer arguments
arguments = inspect.signature(optimizer).parameters.keys()
options_keys = list(options.keys())

#check which values of the options dict can be passed to the optimizer (pop the others)
for key in options_keys:
if key not in arguments: options.pop(key)
if 'maxiter' in arguments: options['maxiter'] = maxiter
raulconchello marked this conversation as resolved.
Show resolved Hide resolved

optimizer = optimizer(**options) #pass the arguments

bestx = pl.numpy.array(x0, requires_grad=True)
besty = cost(x0, *args)
funcalls = 1 # tracks no. of function evals.
niter = 0
improved = True
stop = False

testx = np.copy(bestx)
testy = np.real(besty)
while improved and not stop and niter < maxiter:
improved = False

# compute step
if qfim: #natural_grad_descent
raulconchello marked this conversation as resolved.
Show resolved Hide resolved
testx, testy = optimizer.step_and_cost(cost, bestx, *args, grad_fn=jac, metric_tensor_fn=qfim)
elif jac: #adagrad, adam, vgd, momentum, nesterov_momentum, rmsprop
testx, testy = optimizer.step_and_cost(cost, bestx, *args, grad_fn=jac)
elif method=='rotosolve':
testx, testy = optimizer.step_and_cost(
cost, bestx, *args,
nums_frequency={'params': {(i,):1 for i in range(bestx.size)}} if not nums_frequency else nums_frequency,
spectra=spectra,
shifts=shifts,
full_output=False,
)
else: #spsa
testx, testy = optimizer.step_and_cost(cost, bestx, *args)

# check if stable
if np.abs(besty-testy) < tol and niter > 1:
improved = False

else:
besty = testy
bestx = testx
improved = True

if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break

niter += 1
raulconchello marked this conversation as resolved.
Show resolved Hide resolved

return OptimizeResult(fun=besty, x=np.array(bestx), nit=niter,
nfev=funcalls, success=(niter > 1))


18 changes: 17 additions & 1 deletion openqaoa/optimizers/training_vqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from ..basebackend import VQABaseBackend
from ..qaoa_parameters.baseparams import QAOAVariationalBaseParams
from . import optimization_methods as om
from . import optimization_methods_pennylane as ompl

from .logger_vqa import Logger
from .result import Result
Expand Down Expand Up @@ -524,10 +525,15 @@ class CustomScipyGradientOptimizer(OptimizeVQA):
* optimizer_options

* Dictionary of optimiser-specific arguments, defaults to ``None``
* Used also for the pennylande optimizers (and step function) arguments

"""
CUSTOM_GRADIENT_OPTIMIZERS = ['vgd', 'newton',
'rmsprop', 'natural_grad_descent', 'spsa']
'rmsprop', 'natural_grad_descent', 'spsa',
'pennylane_adagrad', 'pennylane_adam', 'pennylane_vgd',
'pennylane_momentum', 'pennylane_nesterov_momentum',
'pennylane_natural_grad_descent', 'pennylane_rmsprop',
'pennylane_rotosolve', 'pennylane_spsa']

def __init__(self,
vqa_object: Type[VQABaseBackend],
Expand Down Expand Up @@ -617,6 +623,7 @@ def optimize(self):
:
The optimized return object from the ``scipy.optimize`` package the result is assigned to the attribute ``opt_result``
'''

if self.method == 'vgd':
method = om.grad_descent
elif self.method == 'newton':
Expand All @@ -630,6 +637,15 @@ def optimize(self):
elif self.method == 'spsa':
print("Warning : SPSA is an experimental feature.")
method = om.SPSA
elif self.method.lower().split('_')[0] == 'pennylane': # check if we are using a pennylane optimizer
raulconchello marked this conversation as resolved.
Show resolved Hide resolved
method = ompl.pennylane_optimizer
raulconchello marked this conversation as resolved.
Show resolved Hide resolved

self.options['method'] = self.method.lower().replace("pennylane_", "")
shaohenc marked this conversation as resolved.
Show resolved Hide resolved

if self.options['method'] == 'natural_grad_descent':
self.options['qfim'] = qfim(self.vqa_object, self.variational_params, self.log)
if self.options['method'] in ['spsa', 'rotosolve']:
self.jac = None

try:
if self.hess == None:
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
"matplotlib>=3.4.3, <3.5.0",
"qiskit>=0.36.1",
"pyquil>=3.1.0",
"docplex>=2.23.1"
"docplex>=2.23.1",
"pennylane>=0.26.0"
]

requirements_docs = [
Expand Down
50 changes: 50 additions & 0 deletions tests/test_optimizers_pennylane.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import warnings
import unittest

import networkx as nx
from openqaoa.workflows.optimizer import QAOA
from openqaoa.devices import create_device
from openqaoa.problems.problem import MaximumCut
from openqaoa.optimizers.training_vqa import CustomScipyGradientOptimizer


#create a problem
nodes = 4
edge_probability = 0.6
g = nx.generators.fast_gnp_random_graph(n=nodes,p=edge_probability)
maxcut_prob = MaximumCut(g)
maxcut_qubo = maxcut_prob.get_qubo_problem()


class TestPennylaneOptimizers(unittest.TestCase):
raulconchello marked this conversation as resolved.
Show resolved Hide resolved

def _run_method(self, method):
" function tu run the test for any method "
q = QAOA()
device = create_device(location='local', name='qiskit.statevector_simulator')
q.set_device(device)
raulconchello marked this conversation as resolved.
Show resolved Hide resolved


q.set_circuit_properties(p=2, param_type='standard', init_type='rand', mixer_hamiltonian='x')
q.set_backend_properties(prepend_state=None, append_state=None)
q.set_classical_optimizer(method=method, maxiter=4, optimizer_options = {'blocking':False, 'resamplings': 0},
optimization_progress=True, cost_progress=True, parameter_log=True, jac='finite_difference')

q.compile(maxcut_qubo)
q.optimize()

def test_pennylane_optimizers(self):
" function to run the tests for pennylane optimizers "
list_optimizers = CustomScipyGradientOptimizer.CUSTOM_GRADIENT_OPTIMIZERS

for opt in list_optimizers:
if opt.split()[0] == "pennylane":
self._run_method(opt)




if __name__ == "__main__":
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=PendingDeprecationWarning)
unittest.main()