From e568b1faa5283583a536e3f0632f9e39e6284d80 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Tue, 27 Dec 2022 08:00:53 +0000 Subject: [PATCH 01/11] checking if the new problem is an empty dictionary and if it is, break the rqaoa cycle and solve for a the smallest non-vanishing instance. --- openqaoa/rqaoa/rqaoa.py | 14 ++++++++++---- openqaoa/workflows/optimizer.py | 33 ++++++++++++++++++--------------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/openqaoa/rqaoa/rqaoa.py b/openqaoa/rqaoa/rqaoa.py index 3dc104d38..f76e16fb7 100644 --- a/openqaoa/rqaoa/rqaoa.py +++ b/openqaoa/rqaoa/rqaoa.py @@ -388,7 +388,7 @@ def redefine_problem(problem: QUBO, spin_map: dict): spin_map: `dict` Updated spin_map with sponatenous eliminations from cancellations during spin removal process. """ - + # Define new QUBO problem as a dictionary new_problem_dict = {} @@ -498,9 +498,15 @@ def redefine_problem(problem: QUBO, spin_map: dict): # Delete isolated node from new problem new_problem_dict.pop((node,)) - # Redefine new QUBO problem from the dictionary - new_problem = problem_from_dict(new_problem_dict) - + # For some unweighted graphs specific eliminations can lead to eliminating the whole instance before reaching cutoff. + if new_problem_dict == {}: + new_problem = problem # set the problem to the old problem and solve classically for the smallest non-vanishing instance. + + else: + # Redefine new QUBO problem from the dictionary + new_problem = problem_from_dict(new_problem_dict) + + return new_problem, spin_map diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index b5e85d19a..975a76158 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -713,26 +713,29 @@ def optimize(self, verbose=False): spin_map = rqaoa.spin_mapping(problem, max_terms_and_stats) # Eliminate spins and redefine problem new_problem, spin_map = rqaoa.redefine_problem(problem, spin_map) + if new_problem == problem: + print("Eliminations lead to a total reduction of the problem.\n Increasing the cutoff to solve the smallest non-vanishing problem.") + break + else: + # Extract final set of eliminations with correct dependencies and update tracker + eliminations = {(spin_map[spin][1],spin):spin_map[spin][0] for spin in sorted(spin_map.keys()) if spin != spin_map[spin][1]} + elimination_tracker.append(eliminations) - # Extract final set of eliminations with correct dependencies and update tracker - eliminations = {(spin_map[spin][1],spin):spin_map[spin][0] for spin in sorted(spin_map.keys()) if spin != spin_map[spin][1]} - elimination_tracker.append(eliminations) + # Extract new number of qubits + n_qubits = new_problem.n - # Extract new number of qubits - n_qubits = new_problem.n + # Save qaoa object and new problem + qaoa_steps.append(copy.deepcopy(q)) + problem_steps.append(copy.deepcopy(new_problem)) - # Save qaoa object and new problem - qaoa_steps.append(copy.deepcopy(q)) - problem_steps.append(copy.deepcopy(new_problem)) + # problem is updated + problem = new_problem - # problem is updated - problem = new_problem - - # Compile qaoa with the problem - q.compile(problem, verbose=False) + # Compile qaoa with the problem + q.compile(problem, verbose=False) - # Add one step to the counter - counter += 1 + # Add one step to the counter + counter += 1 # Solve the new problem classically cl_energy, cl_ground_states = ground_state_hamiltonian(problem.hamiltonian) From 851352621182aa73ef998f722f5da9fa7551b486 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Wed, 28 Dec 2022 05:51:03 +0000 Subject: [PATCH 02/11] removing the else for readability since the if is a rare case --- openqaoa/workflows/optimizer.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 975a76158..25558bbb6 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -713,29 +713,30 @@ def optimize(self, verbose=False): spin_map = rqaoa.spin_mapping(problem, max_terms_and_stats) # Eliminate spins and redefine problem new_problem, spin_map = rqaoa.redefine_problem(problem, spin_map) + # In case eliminations cancel out the whole graph, break the loop (solve classically) before reaching the predefined cutoff. if new_problem == problem: print("Eliminations lead to a total reduction of the problem.\n Increasing the cutoff to solve the smallest non-vanishing problem.") break - else: - # Extract final set of eliminations with correct dependencies and update tracker - eliminations = {(spin_map[spin][1],spin):spin_map[spin][0] for spin in sorted(spin_map.keys()) if spin != spin_map[spin][1]} - elimination_tracker.append(eliminations) + + # Extract final set of eliminations with correct dependencies and update tracker + eliminations = {(spin_map[spin][1],spin):spin_map[spin][0] for spin in sorted(spin_map.keys()) if spin != spin_map[spin][1]} + elimination_tracker.append(eliminations) - # Extract new number of qubits - n_qubits = new_problem.n + # Extract new number of qubits + n_qubits = new_problem.n - # Save qaoa object and new problem - qaoa_steps.append(copy.deepcopy(q)) - problem_steps.append(copy.deepcopy(new_problem)) + # Save qaoa object and new problem + qaoa_steps.append(copy.deepcopy(q)) + problem_steps.append(copy.deepcopy(new_problem)) - # problem is updated - problem = new_problem + # problem is updated + problem = new_problem - # Compile qaoa with the problem - q.compile(problem, verbose=False) + # Compile qaoa with the problem + q.compile(problem, verbose=False) - # Add one step to the counter - counter += 1 + # Add one step to the counter + counter += 1 # Solve the new problem classically cl_energy, cl_ground_states = ground_state_hamiltonian(problem.hamiltonian) From 7138c180ca2a8c2ebda605cc0779e54b72ba4fe6 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Wed, 11 Jan 2023 05:47:53 +0000 Subject: [PATCH 03/11] setting to an arbitrary state instead of solving classically if the whole problem fails --- openqaoa/workflows/optimizer.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 126adbec6..8785fc47b 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -22,7 +22,7 @@ from openqaoa.workflows.parameters.qaoa_parameters import CircuitProperties, BackendProperties, ClassicalOptimizer from openqaoa.workflows.parameters.rqaoa_parameters import RqaoaParameters, ALLOWED_RQAOA_TYPES from openqaoa.qaoa_parameters import Hamiltonian, QAOACircuitParams, create_qaoa_variational_params -from openqaoa.utilities import get_mixer_hamiltonian, ground_state_hamiltonian, exp_val_hamiltonian_termwise +from openqaoa.utilities import get_mixer_hamiltonian, ground_state_hamiltonian, exp_val_hamiltonian_termwise, bitstring_energy from openqaoa.backends.qaoa_backend import get_qaoa_backend, DEVICE_NAME_TO_OBJECT_MAPPER, DEVICE_ACCESS_OBJECT_MAPPER from openqaoa.optimizers.qaoa_optimizer import get_optimizer from openqaoa.basebackend import QAOABaseBackendStatevector @@ -699,6 +699,7 @@ def optimize(self, verbose=False): else: f_max_terms = rqaoa.max_terms + total_elimination = False # flag, set to true if the problem vanishes due to elimination before reachign cutoff # If above cutoff, loop quantumly, else classically while n_qubits > n_cutoff: @@ -713,9 +714,9 @@ def optimize(self, verbose=False): spin_map = rqaoa.spin_mapping(problem, max_terms_and_stats) # Eliminate spins and redefine problem new_problem, spin_map = rqaoa.redefine_problem(problem, spin_map) - # In case eliminations cancel out the whole graph, break the loop (solve classically) before reaching the predefined cutoff. + # In case eliminations cancel out the whole graph, break the loop before reaching the predefined cutoff. if new_problem == problem: - print("Eliminations lead to a total reduction of the problem.\n Increasing the cutoff to solve the smallest non-vanishing problem.") + total_elimination = True break # Extract final set of eliminations with correct dependencies and update tracker @@ -737,9 +738,23 @@ def optimize(self, verbose=False): # Add one step to the counter counter += 1 - - # Solve the new problem classically - cl_energy, cl_ground_states = ground_state_hamiltonian(problem.hamiltonian) + + # In case eliminations cancel out the whole graph, spin values do not matter + #if total_elimination: + if False: + # Set the values of the spins arbitrarily + cl_ground_states = "" + for spin in np.arange(0, len(spin_map.keys())): + #spin_value = np.random.choice([0,1]) # set at random + #cl_ground_states += str(spin_value) + cl_ground_states += str(0) # set everything to 0 + cl_ground_states[0] = 1 # set the first one to 1 to respect anticorrelations + cl_ground_states = [cl_ground_states] + cl_energy = bitstring_energy(problem.hamiltonian, cl_ground_states[0]) + + else: + # Solve the new problem classically + cl_energy, cl_ground_states = ground_state_hamiltonian(problem.hamiltonian) # Retrieve full solutions including eliminated spins and their energies full_solutions = rqaoa.final_solution( From fcc228547184e91f53a505bc9b0b9d5a546d1779 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Wed, 11 Jan 2023 06:13:21 +0000 Subject: [PATCH 04/11] solving merge conflicts --- openqaoa/workflows/optimizer.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 02195a0c9..2cf725f99 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -959,15 +959,14 @@ def optimize(self, verbose=False): if self.rqaoa_parameters.rqaoa_type == "adaptive": f_max_terms = rqaoa.ada_max_terms else: - f_max_terms = rqaoa.max_terms + f_max_terms = rqaoa.max_terms -<<<<<<< HEAD - total_elimination = False # flag, set to true if the problem vanishes due to elimination before reachign cutoff -======= # timestamp for the start of the optimization self.header['execution_time_start'] = int(time.time()) - ->>>>>>> dev + + # flag, set to true if the problem vanishes due to elimination before reaching cutoff + total_elimination = False + # If above cutoff, loop quantumly, else classically while n_qubits > n_cutoff: From 3b6744138a7b9b47a3e6fc02e25b2c9b000f78fd Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Wed, 11 Jan 2023 07:53:41 +0000 Subject: [PATCH 05/11] generating all permutations with one different spin but is nto the correct solution --- openqaoa/workflows/optimizer.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 2cf725f99..97b592406 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -1011,17 +1011,27 @@ def optimize(self, verbose=False): counter += 1 # In case eliminations cancel out the whole graph, spin values do not matter - #if total_elimination: - if False: + if total_elimination: # Set the values of the spins arbitrarily - cl_ground_states = "" - for spin in np.arange(0, len(spin_map.keys())): + #cl_ground_states = "" + #for spin in np.arange(0, len(spin_map.keys())): #spin_value = np.random.choice([0,1]) # set at random #cl_ground_states += str(spin_value) - cl_ground_states += str(0) # set everything to 0 - cl_ground_states[0] = 1 # set the first one to 1 to respect anticorrelations - cl_ground_states = [cl_ground_states] - cl_energy = bitstring_energy(problem.hamiltonian, cl_ground_states[0]) + #cl_ground_states += str(0) # set everything to 0 + #cl_ground_states[0] = 1 # set the first one to 1 to respect anticorrelations + #cl_ground_states = [cl_ground_states] + + #generate all permutations, high degeneracy + single_cl_ground_state = ["0" for _ in np.arange(0, len(spin_map.keys()))] + single_cl_ground_state[0] = "1" + single_cl_ground_state = ''.join(single_cl_ground_state) + single_cl_ground_state = str(single_cl_ground_state) + + from itertools import permutations + cl_ground_states = [''.join(p) for p in permutations("1000")] + cl_ground_states = set(cl_ground_states) + + cl_energy = bitstring_energy(problem.hamiltonian, single_cl_ground_state) else: # Solve the new problem classically From 13a2e12dceb1a64d2e2aee34486eb1ebe9933ca4 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Wed, 11 Jan 2023 08:39:47 +0000 Subject: [PATCH 06/11] writing a test and reverting to findign the classical solution of the larger problem. --- openqaoa/workflows/optimizer.py | 3 +- tests/test_rqaoa.py | 49 +++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 97b592406..254cc1091 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -1011,7 +1011,8 @@ def optimize(self, verbose=False): counter += 1 # In case eliminations cancel out the whole graph, spin values do not matter - if total_elimination: + if False: + #if total_elimination: # Set the values of the spins arbitrarily #cl_ground_states = "" #for spin in np.arange(0, len(spin_map.keys())): diff --git a/tests/test_rqaoa.py b/tests/test_rqaoa.py index bb49718b4..10fb4c426 100644 --- a/tests/test_rqaoa.py +++ b/tests/test_rqaoa.py @@ -14,9 +14,13 @@ import numpy as np import unittest +import networkx as nx from openqaoa.qaoa_parameters import Hamiltonian from openqaoa.rqaoa import * +from openqaoa.problems.problem import MaximumCut +from openqaoa.workflows.optimizer import RQAOA +from openqaoa.devices import create_device """ Unittest based testing of current implementation of the RQAOA Algorithm @@ -288,6 +292,51 @@ def test_redefine_problem(self): # Test computed Hamiltonian contains the correct terms assert np.allclose(hamiltonian.constant,comp_hamiltonian.constant), f'Constant in the computed Hamiltonian is incorrect' + def test_total_elimination_whole_workflow(self): + """ + Testing an edge case: solving MaxCut on a specific random unweighted graph leads to vanishing instances before reaching cutoff size. + The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained for the smallest cutoff for which the problem still exists. + """ + # Generate the graph + g = nx.generators.gnp_random_graph(n=12, p=0.7, seed=58, directed=False) + + # Define the problem and translate it into a binary QUBO. + maxcut_qubo = MaximumCut(g).get_qubo_problem() + + # Define the RQAOA object + R = RQAOA() + + # Set parameters for RQAOA: standard with cut off size 3 qubits + R.set_rqaoa_parameters(steps=1, n_cutoff=3) + + # Set more parameters with a very specific starting point + R.set_circuit_properties(p=1, init_type='custom', variational_params_dict={"betas":[0.2732211141792405], "gammas":[1.6017587697695814]}, mixer_hamiltonian='x') + + # Define the device to be vectorized + device = create_device(location='local', name='vectorized') + R.set_device(device) + + # Set the classical method used to optimiza over QAOA angles and its properties + R.set_classical_optimizer(method="cobyla", maxiter=200) + + # Compile and optimize the problem instance on RQAOA + R.compile(maxcut_qubo) + R.optimize() + + # Get results + opt_results = R.results + + # Compare results to known behaviour: + assert opt_results['solution'] == {'101010100010': -11.0, + '010100010101': -11.0, + '101100010101': -11.0, + '101011100010': -11.0, + '010101010101': -11.0, + '101010101010': -11.0, + '010100011101': -11.0, + '010011101010': -11.0, + '101011101010': -11.0, + '010101011101': -11.0} if __name__ == "__main__": unittest.main() From 32f67ea05b5e2d88445e5b4a3e700eaaca857782 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Thu, 12 Jan 2023 05:25:23 +0000 Subject: [PATCH 07/11] changing the code such that we don't solve classically for the larger problem but instead produce all bitstrings where spins are fixed arbitratily if None in the spin map or according to the correlations --- openqaoa/rqaoa/rqaoa.py | 46 +++++++++++++++++++++++++++++++-- openqaoa/workflows/optimizer.py | 27 +++---------------- tests/test_rqaoa.py | 11 ++++---- 3 files changed, 53 insertions(+), 31 deletions(-) diff --git a/openqaoa/rqaoa/rqaoa.py b/openqaoa/rqaoa/rqaoa.py index 24876650e..411e05fd1 100644 --- a/openqaoa/rqaoa/rqaoa.py +++ b/openqaoa/rqaoa/rqaoa.py @@ -500,12 +500,11 @@ def redefine_problem(problem: QUBO, spin_map: dict): # For some unweighted graphs specific eliminations can lead to eliminating the whole instance before reaching cutoff. if new_problem_dict == {}: - new_problem = problem # set the problem to the old problem and solve classically for the smallest non-vanishing instance. + new_problem = problem # set the problem to the old problem and solve classically for the smallest non-vanishing instance. else: # Redefine new QUBO problem from the dictionary new_problem = problem_from_dict(new_problem_dict) - return new_problem, spin_map @@ -580,3 +579,46 @@ def final_solution(elimination_tracker: list, cl_states: list, hamiltonian: Hami full_solution.update({"".join(str(i) for i in state):bitstring_energy(hamiltonian, state)}) return full_solution + + +def solution_for_vanishing_instances(hamiltonian: Hamiltonian, spin_map: dict): + cl_ground_states = [""] + + for spin in spin_map.keys(): + new_cl_ground_states = [] + + if spin_map[spin][1] == None: + # add 0 or 1 arbitrarily + + for ground_state in cl_ground_states: + first_new_ground_state = ground_state + "0" + second_new_ground_state = ground_state + "1" + + new_cl_ground_states.append(first_new_ground_state) + new_cl_ground_states.append(second_new_ground_state) + + cl_ground_states = new_cl_ground_states + + else: + # fix according to correlation factor + factor = spin_map[spin][0] + parent = spin_map[spin][1] + + for ground_state in cl_ground_states: + if factor == 1.0: + # correlated + new_value_spin = ground_state[parent] + else: + # anticorrelated + new_value_spin = str(int(not bool(int(ground_state[parent])))) + + new_ground_state = ground_state + new_value_spin + new_cl_ground_states.append(new_ground_state) + + cl_ground_states = new_cl_ground_states + + # computing the energy of the first one only, assuming degeneracy + cl_energy = bitstring_energy(hamiltonian, cl_ground_states[0]) + + return cl_energy, cl_ground_states + diff --git a/openqaoa/workflows/optimizer.py b/openqaoa/workflows/optimizer.py index 254cc1091..8b7514860 100644 --- a/openqaoa/workflows/optimizer.py +++ b/openqaoa/workflows/optimizer.py @@ -1010,30 +1010,9 @@ def optimize(self, verbose=False): # Add one step to the counter counter += 1 - # In case eliminations cancel out the whole graph, spin values do not matter - if False: - #if total_elimination: - # Set the values of the spins arbitrarily - #cl_ground_states = "" - #for spin in np.arange(0, len(spin_map.keys())): - #spin_value = np.random.choice([0,1]) # set at random - #cl_ground_states += str(spin_value) - #cl_ground_states += str(0) # set everything to 0 - #cl_ground_states[0] = 1 # set the first one to 1 to respect anticorrelations - #cl_ground_states = [cl_ground_states] - - #generate all permutations, high degeneracy - single_cl_ground_state = ["0" for _ in np.arange(0, len(spin_map.keys()))] - single_cl_ground_state[0] = "1" - single_cl_ground_state = ''.join(single_cl_ground_state) - single_cl_ground_state = str(single_cl_ground_state) - - from itertools import permutations - cl_ground_states = [''.join(p) for p in permutations("1000")] - cl_ground_states = set(cl_ground_states) - - cl_energy = bitstring_energy(problem.hamiltonian, single_cl_ground_state) - + if total_elimination: + # Solve the smallest non-vanishing problem by fixing spins arbitrarily or according to the correlations + cl_energy, cl_ground_states = rqaoa.solution_for_vanishing_instances(problem.hamiltonian, spin_map) else: # Solve the new problem classically cl_energy, cl_ground_states = ground_state_hamiltonian(problem.hamiltonian) diff --git a/tests/test_rqaoa.py b/tests/test_rqaoa.py index 10fb4c426..39b44ec0e 100644 --- a/tests/test_rqaoa.py +++ b/tests/test_rqaoa.py @@ -295,7 +295,7 @@ def test_redefine_problem(self): def test_total_elimination_whole_workflow(self): """ Testing an edge case: solving MaxCut on a specific random unweighted graph leads to vanishing instances before reaching cutoff size. - The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained for the smallest cutoff for which the problem still exists. + The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained by fixing all spins arbitrarily except the correlated ones. """ # Generate the graph g = nx.generators.gnp_random_graph(n=12, p=0.7, seed=58, directed=False) @@ -316,7 +316,7 @@ def test_total_elimination_whole_workflow(self): device = create_device(location='local', name='vectorized') R.set_device(device) - # Set the classical method used to optimiza over QAOA angles and its properties + # Set the classical method used to optimize over QAOA angles and its properties R.set_classical_optimizer(method="cobyla", maxiter=200) # Compile and optimize the problem instance on RQAOA @@ -327,14 +327,15 @@ def test_total_elimination_whole_workflow(self): opt_results = R.results # Compare results to known behaviour: + # note that the problem is highly degenerate and provide only the solutions which obey the correlations identified by the algorithm. For example, for n=4, there are 10 classical strings with the same energy, but only 8 of them have spins 0 and 1 anticorrelated. assert opt_results['solution'] == {'101010100010': -11.0, '010100010101': -11.0, - '101100010101': -11.0, + #'101100010101': -11.0, '101011100010': -11.0, '010101010101': -11.0, '101010101010': -11.0, - '010100011101': -11.0, - '010011101010': -11.0, + '010100011101': -11.0, + #'010011101010': -11.0, '101011101010': -11.0, '010101011101': -11.0} From 5a034b886a432e3ddbd98d06ebaa23a09d85e7c7 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Fri, 13 Jan 2023 09:03:59 +0000 Subject: [PATCH 08/11] implementing some more tests to include other edge cases of vanishing instances --- tests/test_rqaoa.py | 56 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/tests/test_rqaoa.py b/tests/test_rqaoa.py index 39b44ec0e..ef6b605f4 100644 --- a/tests/test_rqaoa.py +++ b/tests/test_rqaoa.py @@ -291,14 +291,11 @@ def test_redefine_problem(self): # Test computed Hamiltonian contains the correct terms assert np.allclose(hamiltonian.constant,comp_hamiltonian.constant), f'Constant in the computed Hamiltonian is incorrect' - - def test_total_elimination_whole_workflow(self): - """ - Testing an edge case: solving MaxCut on a specific random unweighted graph leads to vanishing instances before reaching cutoff size. - The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained by fixing all spins arbitrarily except the correlated ones. - """ + + + def workflow_mockup(self, graph_seed): # Generate the graph - g = nx.generators.gnp_random_graph(n=12, p=0.7, seed=58, directed=False) + g = nx.generators.gnp_random_graph(n=12, p=0.7, seed=graph_seed, directed=False) # Define the problem and translate it into a binary QUBO. maxcut_qubo = MaximumCut(g).get_qubo_problem() @@ -322,13 +319,20 @@ def test_total_elimination_whole_workflow(self): # Compile and optimize the problem instance on RQAOA R.compile(maxcut_qubo) R.optimize() + + return R - # Get results - opt_results = R.results + + def test_total_elimination_whole_workflow(self): + """ + Testing an edge case: solving MaxCut on a specific random unweighted graph leads to vanishing instances before reaching cutoff size. + The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained by fixing all spins arbitrarily except the correlated ones. + + Note that the often those problems are highly degenerate and we provide only the solutions which obey the correlations identified by the algorithm. For example, for n=4, there are 10 classical strings with the same energy, but only 8 of them have the corresponding spins anticorrelated. + """ + R_58 = self.workflow_mockup(graph_seed=58) - # Compare results to known behaviour: - # note that the problem is highly degenerate and provide only the solutions which obey the correlations identified by the algorithm. For example, for n=4, there are 10 classical strings with the same energy, but only 8 of them have spins 0 and 1 anticorrelated. - assert opt_results['solution'] == {'101010100010': -11.0, + assert R_58.results['solution'] == {'101010100010': -11.0, '010100010101': -11.0, #'101100010101': -11.0, '101011100010': -11.0, @@ -338,6 +342,34 @@ def test_total_elimination_whole_workflow(self): #'010011101010': -11.0, '101011101010': -11.0, '010101011101': -11.0} + + R_83 = self.workflow_mockup(graph_seed=83) + + assert R_83.results['solution'] == {'000001011101': -10.0, + '010000101111': -10.0, + '101101010000': -10.0, + #'010100101111': -10.0, + '111100100010': -10.0, + '000011011101': -10.0, + #'101011010000': -10.0, + '010010101111': -10.0, + '101111010000': -10.0, + '111110100010': -10.0} + + R_88 = self.workflow_mockup(graph_seed=88) + + assert R_88.results['solution'] == {'001011011000': -12.0, + '101011011000': -12.0, + '001111011000': -12.0, + '101111011000': -12.0, + #'110100100110': -12.0, + #'001011011001': -12.0, + '010000100111': -12.0, + '110000100111': -12.0, + '010100100111': -12.0, + '110100100111': -12.0} + + if __name__ == "__main__": unittest.main() From c54878536359454c174346833f4614fd0f52e124 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Fri, 13 Jan 2023 09:11:07 +0000 Subject: [PATCH 09/11] formatting the file with black --- tests/test_rqaoa.py | 412 ++++++++++++++++++++++++++++++-------------- 1 file changed, 279 insertions(+), 133 deletions(-) diff --git a/tests/test_rqaoa.py b/tests/test_rqaoa.py index ef6b605f4..4f1a6cfae 100644 --- a/tests/test_rqaoa.py +++ b/tests/test_rqaoa.py @@ -26,8 +26,8 @@ Unittest based testing of current implementation of the RQAOA Algorithm """ -class TestingRQAOA(unittest.TestCase): +class TestingRQAOA(unittest.TestCase): def test_find_parent(self): """ Test of the find_parent function which backtracks the spin_map dictionary to obtain @@ -38,34 +38,46 @@ def test_find_parent(self): """ # Spin map example without correct final dependencies - spin_map = dict({(0,(1,0)),(1,(-1,3)),(2,(1,5)),(3,(-1,2)),(4,(1,4)),(5,(1,4))}) + spin_map = dict( + { + (0, (1, 0)), + (1, (-1, 3)), + (2, (1, 5)), + (3, (-1, 2)), + (4, (1, 4)), + (5, (1, 4)), + } + ) # Solution to the problem - parents = {0:0,1:4,2:4,3:4,4:4,5:4} # spin:parent_spin - factors = [1,1,1,-1,1,1] - + parents = {0: 0, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4} # spin:parent_spin + factors = [1, 1, 1, -1, 1, 1] comp_parents = {} comp_factors = [] # For each spin compute parent and connecting factor and store them for spin in sorted(spin_map.keys()): - - comp_parent, comp_factor = find_parent(spin_map,spin) - comp_parents.update({spin:comp_parent}) + + comp_parent, comp_factor = find_parent(spin_map, spin) + comp_parents.update({spin: comp_parent}) comp_factors.append(comp_factor) # Test function result - assert np.allclose(list(comp_parents.values()),list(parents.values())), f'Computed parent spins are incorrect' - assert np.allclose(comp_factors,factors), f'Computed constraint factors are incorrect' + assert np.allclose( + list(comp_parents.values()), list(parents.values()) + ), f"Computed parent spins are incorrect" + assert np.allclose( + comp_factors, factors + ), f"Computed constraint factors are incorrect" def test_spin_mapping(self): """ Test of the function that generates the spin_map dictionary containing all the elimination rules. - The test consists in generating the correct spin_map and set of max terms and costs in - its final form (accounting for the dependency that determines the elimination) for a + The test consists in generating the correct spin_map and set of max terms and costs in + its final form (accounting for the dependency that determines the elimination) for a given example. """ @@ -75,40 +87,80 @@ def test_spin_mapping(self): n_qubits = 20 # Terms and weights of the graph - edges = [(i,j) for j in range(n_qubits) for i in range(j)] - weights = [1 for _ in range(len(edges))] + edges = [(i, j) for j in range(n_qubits) for i in range(j)] + weights = [1 for _ in range(len(edges))] # Hyperparameters - problem = QUBO(n= n_qubits, terms=edges, weights=weights) + problem = QUBO(n=n_qubits, terms=edges, weights=weights) ## Testing # Example case of the maximal terms and values extracted from the expectation values - max_terms_and_stats = dict({(4,):0.99,(0, 1): -0.89, (0, 9): 0.77, (1,9):-0.73,\ - (17,): -0.5, (2,4):0.32, (5,4):0.29,(16,):-0.8, (10,):0.81, (16,10):0.4, (19,14):-0.47}) + max_terms_and_stats = dict( + { + (4,): 0.99, + (0, 1): -0.89, + (0, 9): 0.77, + (1, 9): -0.73, + (17,): -0.5, + (2, 4): 0.32, + (5, 4): 0.29, + (16,): -0.8, + (10,): 0.81, + (16, 10): 0.4, + (19, 14): -0.47, + } + ) # Exact solution to the problem - correct_spin_map = dict({(0,(1,0)),(1,(-1,0)),(2,(1,None)),(3,(1,3)),(4,(1,None)),\ - (5,(1,None)),(6,(1,6)),(7,(1,7)),(8,(1,8)),(9,(1,0)),(10,(1,None)),(11,(1,11)),(12,(1,12)),\ - (13,(1,13)),(14,(1,14)),(15,(1,15)),(16,(-1,None)),(17,(-1,None)),(18,(1,18)),(19,(-1,14))}) + correct_spin_map = dict( + { + (0, (1, 0)), + (1, (-1, 0)), + (2, (1, None)), + (3, (1, 3)), + (4, (1, None)), + (5, (1, None)), + (6, (1, 6)), + (7, (1, 7)), + (8, (1, 8)), + (9, (1, 0)), + (10, (1, None)), + (11, (1, 11)), + (12, (1, 12)), + (13, (1, 13)), + (14, (1, 14)), + (15, (1, 15)), + (16, (-1, None)), + (17, (-1, None)), + (18, (1, 18)), + (19, (-1, 14)), + } + ) # Compute the spin_map and final constraints from the function - spin_map = spin_mapping(problem,max_terms_and_stats) + spin_map = spin_mapping(problem, max_terms_and_stats) # Check both outputs contain the same number of keys as the correct solution - assert len(correct_spin_map) == len(spin_map), f'Computed spin_map has incorrect length' - + assert len(correct_spin_map) == len( + spin_map + ), f"Computed spin_map has incorrect length" + # Test the spin_map matches the correct solution for key in correct_spin_map.keys(): - assert correct_spin_map[key][0] == spin_map[key][0], f'Computed spin_map contains incorrect factor' - assert correct_spin_map[key][1] == spin_map[key][1], f'Computed spin_map contains incorrect parent spin' + assert ( + correct_spin_map[key][0] == spin_map[key][0] + ), f"Computed spin_map contains incorrect factor" + assert ( + correct_spin_map[key][1] == spin_map[key][1] + ), f"Computed spin_map contains incorrect parent spin" def test_max_terms(self): """ Test of the function that selects spin pairs or singlets are used for elimination, given the specific elimination number. - The test consists in finding the correct pairs and singles to eliminate, according + The test consists in finding the correct pairs and singles to eliminate, according to the adaptive scheme, for a given example. """ @@ -116,33 +168,43 @@ def test_max_terms(self): n_elim = 5 # Set of single spin expectation values - exp_vals_z = np.array([0.33,-0.21,-0.9,0.06,-0.78]) - + exp_vals_z = np.array([0.33, -0.21, -0.9, 0.06, -0.78]) + # Correlation matrix - corr_mat = np.array([[0.0,0.01,-0.64,0.69,0.48],\ - [0.0,0.0,-0.99,-0.27,0.03],\ - [0.0,0.0,0.0,1.0,-0.22],\ - [0.0,0.0,0.0,0.0,0.37],\ - [0.0,0.0,0.0,0.0,0.0]]) - + corr_mat = np.array( + [ + [0.0, 0.01, -0.64, 0.69, 0.48], + [0.0, 0.0, -0.99, -0.27, 0.03], + [0.0, 0.0, 0.0, 1.0, -0.22], + [0.0, 0.0, 0.0, 0.0, 0.37], + [0.0, 0.0, 0.0, 0.0, 0.0], + ] + ) + # Correct solution - max_tc = dict({(2,3):1.0,(1,2):-0.99,(2,):-0.9,(4,):-0.78,(0,3):0.69}) + max_tc = dict( + {(2, 3): 1.0, (1, 2): -0.99, (2,): -0.9, (4,): -0.78, (0, 3): 0.69} + ) # Computed solution using the function comp_max_tc = max_terms(exp_vals_z, corr_mat, n_elim) # Confirm the computed solution has same number of items as correct one - assert len(max_tc) == len(comp_max_tc), f'Computed set of singlets/correlations contains incorrect number of elements' + assert len(max_tc) == len( + comp_max_tc + ), f"Computed set of singlets/correlations contains incorrect number of elements" # Test the function has obtain the correct singlets/pairs with associated values for key in max_tc.keys(): - assert max_tc[key] == comp_max_tc[key], f'Computed set of singlets/correlations contains incorrect values' + assert ( + max_tc[key] == comp_max_tc[key] + ), f"Computed set of singlets/correlations contains incorrect values" def test_ada_max_terms(self): """ Test of the function that adaptively selects spin pairs or singlets are used for elimination. - The test consists in finding the correct pairs and singles to eliminate, according + The test consists in finding the correct pairs and singles to eliminate, according to the adaptive scheme, for a given example. """ @@ -150,34 +212,42 @@ def test_ada_max_terms(self): n_max = 3 # Set of single spin expectation values - exp_vals_z = np.array([0.33,-0.21,-0.9,0.06,-0.19]) - + exp_vals_z = np.array([0.33, -0.21, -0.9, 0.06, -0.19]) + # Correlation matrix - corr_mat = np.array([[0.0,0.01,-0.64,0.69,0.48],\ - [0.0,0.0,-0.99,-0.27,0.03],\ - [0.0,0.0,0.0,1.0,-0.22],\ - [0.0,0.0,0.0,0.0,0.37],\ - [0.0,0.0,0.0,0.0,0.0]]) - + corr_mat = np.array( + [ + [0.0, 0.01, -0.64, 0.69, 0.48], + [0.0, 0.0, -0.99, -0.27, 0.03], + [0.0, 0.0, 0.0, 1.0, -0.22], + [0.0, 0.0, 0.0, 0.0, 0.37], + [0.0, 0.0, 0.0, 0.0, 0.0], + ] + ) + # Correct solution - max_tc = dict({(2,3):1.0,(1,2):-0.99,(2,):-0.9}) + max_tc = dict({(2, 3): 1.0, (1, 2): -0.99, (2,): -0.9}) # Computed solution using the function comp_max_tc = ada_max_terms(exp_vals_z, corr_mat, n_max) # Confirm the computed solution has same number of items as correct one - assert len(max_tc) == len(comp_max_tc), f'Computed set of singlets/correlations contains incorrect number of elements' + assert len(max_tc) == len( + comp_max_tc + ), f"Computed set of singlets/correlations contains incorrect number of elements" # Test the function has obtain the correct singlets/pairs with associated values for key in max_tc.keys(): - assert max_tc[key] == comp_max_tc[key], f'Computed set of singlets/correlations contains incorrect values' + assert ( + max_tc[key] == comp_max_tc[key] + ), f"Computed set of singlets/correlations contains incorrect values" - def test_final_solution(self): + def test_final_solution(self): """ Test the function that reconstructs the final solution by backtracking the elimination history and computing the energy of the final states. - The test consists in reconstructing a set of states for a given elimination history + The test consists in reconstructing a set of states for a given elimination history amnd computing their energies. """ @@ -187,65 +257,113 @@ def test_final_solution(self): n_qubits = 10 # Terms and weights of the graph - edges = [(i,i+1) for i in range(n_qubits-1)] + [(0,n_qubits-1)] + edges = [(i, i + 1) for i in range(n_qubits - 1)] + [(0, n_qubits - 1)] weights = [1 for _ in range(len(edges))] # Hamiltonian - hamiltonian = Hamiltonian.classical_hamiltonian(edges, weights, constant = 0) + hamiltonian = Hamiltonian.classical_hamiltonian(edges, weights, constant=0) ## Testing # Trial elimination history and ouput of classical solver - max_terms_and_stats_list = [ - [{'pair': (0, 1), 'correlation': -1.0}, {'pair': (0, 9), 'correlation': -1.0}], - [{'pair': (0, 1), 'correlation': 1.0}, {'pair': (0, 7), 'correlation': 1.0}], - [{'pair': (0, 1), 'correlation': -1.0}, {'pair': (0, 5), 'correlation': -1.0}], - [{'pair': (0, 1), 'correlation': 1.0}] - ] - - classical_states = [[0, 1, 0],[1, 0, 1]] + max_terms_and_stats_list = [ + [ + {"pair": (0, 1), "correlation": -1.0}, + {"pair": (0, 9), "correlation": -1.0}, + ], + [ + {"pair": (0, 1), "correlation": 1.0}, + {"pair": (0, 7), "correlation": 1.0}, + ], + [ + {"pair": (0, 1), "correlation": -1.0}, + {"pair": (0, 5), "correlation": -1.0}, + ], + [{"pair": (0, 1), "correlation": 1.0}], + ] + + classical_states = [[0, 1, 0], [1, 0, 1]] # Correct solutions - states = ['0101010101','1010101010'] + states = ["0101010101", "1010101010"] energies = [-10, -10] - correct_full_solution = dict(zip(states,energies)) + correct_full_solution = dict(zip(states, energies)) # Compute solutions - full_solution = final_solution(max_terms_and_stats_list, classical_states, hamiltonian) + full_solution = final_solution( + max_terms_and_stats_list, classical_states, hamiltonian + ) # Test the computed solutions - assert correct_full_solution == full_solution, f'Solution was not computed correctly' - + assert ( + correct_full_solution == full_solution + ), f"Solution was not computed correctly" - def test_problem_from_dict(self): + def test_problem_from_dict(self): """ Test the function that computes a calssical Hamiltonian from a given graph, accounting for approriate labelling of the nodes and edges. - + The test consists in generating the correct QUBO problem for a given graph dictionary. """ - + # Trial graph - input_dict = dict({():10,(1,):1,(2,):-1,(6,):4,(1,2):1,(2,5):2,(10,14):3,(6,9):4,(6,14):5,(5,6):6}) - + input_dict = dict( + { + (): 10, + (1,): 1, + (2,): -1, + (6,): 4, + (1, 2): 1, + (2, 5): 2, + (10, 14): 3, + (6, 9): 4, + (6, 14): 5, + (5, 6): 6, + } + ) + # Correct hamiltonian - correct_dict = dict({(0,):1,(1,):-1,(3,):4,(0,1):1,(1,2):2,(2,3):6,(3,4):4,(3,6):5,(5,6):3}) - hamiltonian = Hamiltonian.classical_hamiltonian(list(correct_dict.keys()),list(correct_dict.values()), constant = 10) - hamiltonian_dict = {term.qubit_indices:coeff for term,coeff in zip(hamiltonian.terms,hamiltonian.coeffs)} + correct_dict = dict( + { + (0,): 1, + (1,): -1, + (3,): 4, + (0, 1): 1, + (1, 2): 2, + (2, 3): 6, + (3, 4): 4, + (3, 6): 5, + (5, 6): 3, + } + ) + hamiltonian = Hamiltonian.classical_hamiltonian( + list(correct_dict.keys()), list(correct_dict.values()), constant=10 + ) + hamiltonian_dict = { + term.qubit_indices: coeff + for term, coeff in zip(hamiltonian.terms, hamiltonian.coeffs) + } # Compute hamiltonian from input graph comp_problem = problem_from_dict(input_dict) comp_hamiltonian = comp_problem.hamiltonian - comp_hamiltonian_dict = {term.qubit_indices:coeff for term,coeff in zip(comp_hamiltonian.terms,comp_hamiltonian.coeffs)} - + comp_hamiltonian_dict = { + term.qubit_indices: coeff + for term, coeff in zip(comp_hamiltonian.terms, comp_hamiltonian.coeffs) + } # Test computed Hamiltonian contains the correct terms - assert hamiltonian_dict == comp_hamiltonian_dict, f'Terms and coefficients in the computed Hamiltonian are incorrect' + assert ( + hamiltonian_dict == comp_hamiltonian_dict + ), f"Terms and coefficients in the computed Hamiltonian are incorrect" # Test computed Hamiltonian contains the correct terms - assert np.allclose(hamiltonian.constant,comp_hamiltonian.constant), f'Constant in the computed Hamiltonian is incorrect' + assert np.allclose( + hamiltonian.constant, comp_hamiltonian.constant + ), f"Constant in the computed Hamiltonian is incorrect" - def test_redefine_problem(self): + def test_redefine_problem(self): """ Test the function that computes the new QUBO for a reduced problem, given the original QUBO encoding the problem and a set of elimination rules via the spin_map. @@ -259,40 +377,57 @@ def test_redefine_problem(self): n_qubits = 10 # Edges and weights of the graph - input_edges = [(i,i+1) for i in range(n_qubits-1)] + [(0,n_qubits-1)] + input_edges = [(i, i + 1) for i in range(n_qubits - 1)] + [(0, n_qubits - 1)] input_weights = [1 for _ in range(len(input_edges))] # Input problem input_problem = QUBO(n_qubits, input_edges, input_weights) # Input spin map (elimination rules) - spin_map = dict({(0,(1,0)),(1,(-1,0)),(2,(1,2)),(3,(1,3)),(4,(1,2)),\ - (5,(1,2)),(6,(1,6)),(7,(1,7)),(8,(1,8)),(9,(1,0))}) + spin_map = dict( + { + (0, (1, 0)), + (1, (-1, 0)), + (2, (1, 2)), + (3, (1, 3)), + (4, (1, 2)), + (5, (1, 2)), + (6, (1, 6)), + (7, (1, 7)), + (8, (1, 8)), + (9, (1, 0)), + } + ) # Compute new problem comp_problem, _ = redefine_problem(input_problem, spin_map) - + # Compute the new hamiltonian comp_hamiltonian = comp_problem.hamiltonian ## Testing (Comparing the new hamiltonian with the correct one) # Correct edges, weights and hamiltonian for the reduced problem - edges = [(0,1),(1,2),(1,3),(3,4),(4,5),(0,5)] - weights = [-1,2,1,1,1,1] + edges = [(0, 1), (1, 2), (1, 3), (3, 4), (4, 5), (0, 5)] + weights = [-1, 2, 1, 1, 1, 1] - hamiltonian = Hamiltonian.classical_hamiltonian(edges, weights, constant = 0) + hamiltonian = Hamiltonian.classical_hamiltonian(edges, weights, constant=0) # Test computed Hamiltonian contains the correct terms - assert hamiltonian.terms == comp_hamiltonian.terms, f'Terms in the computed Hamiltonian are incorrect' + assert ( + hamiltonian.terms == comp_hamiltonian.terms + ), f"Terms in the computed Hamiltonian are incorrect" # Test computed Hamiltonian contains the correct terms - assert np.allclose(hamiltonian.coeffs,comp_hamiltonian.coeffs), f'Coefficients in the computed Hamiltonian are incorrect' + assert np.allclose( + hamiltonian.coeffs, comp_hamiltonian.coeffs + ), f"Coefficients in the computed Hamiltonian are incorrect" # Test computed Hamiltonian contains the correct terms - assert np.allclose(hamiltonian.constant,comp_hamiltonian.constant), f'Constant in the computed Hamiltonian is incorrect' - - + assert np.allclose( + hamiltonian.constant, comp_hamiltonian.constant + ), f"Constant in the computed Hamiltonian is incorrect" + def workflow_mockup(self, graph_seed): # Generate the graph g = nx.generators.gnp_random_graph(n=12, p=0.7, seed=graph_seed, directed=False) @@ -305,12 +440,20 @@ def workflow_mockup(self, graph_seed): # Set parameters for RQAOA: standard with cut off size 3 qubits R.set_rqaoa_parameters(steps=1, n_cutoff=3) - + # Set more parameters with a very specific starting point - R.set_circuit_properties(p=1, init_type='custom', variational_params_dict={"betas":[0.2732211141792405], "gammas":[1.6017587697695814]}, mixer_hamiltonian='x') + R.set_circuit_properties( + p=1, + init_type="custom", + variational_params_dict={ + "betas": [0.2732211141792405], + "gammas": [1.6017587697695814], + }, + mixer_hamiltonian="x", + ) # Define the device to be vectorized - device = create_device(location='local', name='vectorized') + device = create_device(location="local", name="vectorized") R.set_device(device) # Set the classical method used to optimize over QAOA angles and its properties @@ -319,58 +462,61 @@ def workflow_mockup(self, graph_seed): # Compile and optimize the problem instance on RQAOA R.compile(maxcut_qubo) R.optimize() - + return R - def test_total_elimination_whole_workflow(self): """ Testing an edge case: solving MaxCut on a specific random unweighted graph leads to vanishing instances before reaching cutoff size. The test recreates the graph instance and MaxCut QUBO, runs standard RQAOA and compare the result to the expected one if the classical solution was obtained by fixing all spins arbitrarily except the correlated ones. - + Note that the often those problems are highly degenerate and we provide only the solutions which obey the correlations identified by the algorithm. For example, for n=4, there are 10 classical strings with the same energy, but only 8 of them have the corresponding spins anticorrelated. """ R_58 = self.workflow_mockup(graph_seed=58) - assert R_58.results['solution'] == {'101010100010': -11.0, - '010100010101': -11.0, - #'101100010101': -11.0, - '101011100010': -11.0, - '010101010101': -11.0, - '101010101010': -11.0, - '010100011101': -11.0, - #'010011101010': -11.0, - '101011101010': -11.0, - '010101011101': -11.0} - + assert R_58.results["solution"] == { + "101010100010": -11.0, + "010100010101": -11.0, + #'101100010101': -11.0, + "101011100010": -11.0, + "010101010101": -11.0, + "101010101010": -11.0, + "010100011101": -11.0, + #'010011101010': -11.0, + "101011101010": -11.0, + "010101011101": -11.0, + } + R_83 = self.workflow_mockup(graph_seed=83) - assert R_83.results['solution'] == {'000001011101': -10.0, - '010000101111': -10.0, - '101101010000': -10.0, - #'010100101111': -10.0, - '111100100010': -10.0, - '000011011101': -10.0, - #'101011010000': -10.0, - '010010101111': -10.0, - '101111010000': -10.0, - '111110100010': -10.0} - + assert R_83.results["solution"] == { + "000001011101": -10.0, + "010000101111": -10.0, + "101101010000": -10.0, + #'010100101111': -10.0, + "111100100010": -10.0, + "000011011101": -10.0, + #'101011010000': -10.0, + "010010101111": -10.0, + "101111010000": -10.0, + "111110100010": -10.0, + } + R_88 = self.workflow_mockup(graph_seed=88) - assert R_88.results['solution'] == {'001011011000': -12.0, - '101011011000': -12.0, - '001111011000': -12.0, - '101111011000': -12.0, - #'110100100110': -12.0, - #'001011011001': -12.0, - '010000100111': -12.0, - '110000100111': -12.0, - '010100100111': -12.0, - '110100100111': -12.0} - - + assert R_88.results["solution"] == { + "001011011000": -12.0, + "101011011000": -12.0, + "001111011000": -12.0, + "101111011000": -12.0, + #'110100100110': -12.0, + #'001011011001': -12.0, + "010000100111": -12.0, + "110000100111": -12.0, + "010100100111": -12.0, + "110100100111": -12.0, + } + if __name__ == "__main__": - unittest.main() - \ No newline at end of file + unittest.main() From f18a06e5df2aa5b75495efc070c8ca3a4e16b571 Mon Sep 17 00:00:00 2001 From: Kristina Kirova Date: Fri, 13 Jan 2023 09:42:18 +0000 Subject: [PATCH 10/11] adding description of the function --- openqaoa/rqaoa/rqaoa.py | 209 ++++++++++++++++++++++++---------------- 1 file changed, 128 insertions(+), 81 deletions(-) diff --git a/openqaoa/rqaoa/rqaoa.py b/openqaoa/rqaoa/rqaoa.py index 411e05fd1..8cf1498db 100644 --- a/openqaoa/rqaoa/rqaoa.py +++ b/openqaoa/rqaoa/rqaoa.py @@ -19,12 +19,10 @@ from openqaoa.problems.problem import QUBO - - def max_terms(exp_vals_z: np.ndarray, corr_matrix: np.ndarray, n_elim: int): """ - Extracts the n_elim expectation values (single spin and correlation) with - highest magnitude, and uses them to impose the elimination constraint on + Extracts the n_elim expectation values (single spin and correlation) with + highest magnitude, and uses them to impose the elimination constraint on the spins. Parameters @@ -38,7 +36,7 @@ def max_terms(exp_vals_z: np.ndarray, corr_matrix: np.ndarray, n_elim: int): Returns ------- - max_terms_and_stats: `dict` + max_terms_and_stats: `dict` Dictionary containing terms to be eliminated and their expectation values. """ # Copy list of single spin expectation values @@ -81,15 +79,15 @@ def max_terms(exp_vals_z: np.ndarray, corr_matrix: np.ndarray, n_elim: int): # Flag if we have have not been able to extract any relation for the terms if max_terms_and_stats == {}: - print(f'All expectation values are 0: Breaking degeneracy by fixing a qubit\n') - max_terms_and_stats = {(0,):-1.0} - + print(f"All expectation values are 0: Breaking degeneracy by fixing a qubit\n") + max_terms_and_stats = {(0,): -1.0} + return max_terms_and_stats def ada_max_terms(exp_vals_z: np.ndarray, corr_matrix: np.ndarray, n_max: int): """ - Extracts the n_max+1 expectation values (single spin and correlation) with + Extracts the n_max+1 expectation values (single spin and correlation) with highest magnitude, computes the average among them and selects the ones above average for elimination. The maximum number of potential candidates is n_max. @@ -148,23 +146,24 @@ def ada_max_terms(exp_vals_z: np.ndarray, corr_matrix: np.ndarray, n_max: int): # Flag if we have have not been able to extract any relation for the terms if max_terms_and_stats == {}: - print(f'All expectation values are 0: Breaking degeneracy by fixing a qubit\n') - max_terms_and_stats = {(0,):-1.0} - + print(f"All expectation values are 0: Breaking degeneracy by fixing a qubit\n") + max_terms_and_stats = {(0,): -1.0} + # Correlation average magnitude - avg_mag_stats = np.round( - np.mean(np.abs(list(max_terms_and_stats.values()))), 10) + avg_mag_stats = np.round(np.mean(np.abs(list(max_terms_and_stats.values()))), 10) # Select only the ones above average - max_terms_and_stats = {key: value for key, value in max_terms_and_stats.items( - ) if np.abs(value) >= avg_mag_stats} + max_terms_and_stats = { + key: value + for key, value in max_terms_and_stats.items() + if np.abs(value) >= avg_mag_stats + } # Cut down the number of eliminations if, due to symmetry, they exceed the number allowed - relevant for unweighted graphs if len(max_terms_and_stats) > n_max: max_keys = list(max_terms_and_stats.keys())[0:n_max] - max_terms_and_stats = { - key: max_terms_and_stats[key] for key in max_keys} + max_terms_and_stats = {key: max_terms_and_stats[key] for key in max_keys} return max_terms_and_stats @@ -208,12 +207,12 @@ def find_parent(spin_map: dict, spin: int, factor: int = 1): def spin_mapping(problem: QUBO, max_terms_and_stats: dict): """ Generates a map between spins in the original problem graph and in the reduced graph. - Elimination constraints from correlations define a constrained spin, to be removed, and a - parent spin, to be kept. Parent spins determine the state of multiple spins by a - chain of dependencies between spins due to the different constraints. Note that there - is always a parent spin and only one. If cycles are present, less edges will - be eliminated to satisfy this requirement. Constraints following from biases result in - fixing spins to a specific value. In this case, the parent spin is set + Elimination constraints from correlations define a constrained spin, to be removed, and a + parent spin, to be kept. Parent spins determine the state of multiple spins by a + chain of dependencies between spins due to the different constraints. Note that there + is always a parent spin and only one. If cycles are present, less edges will + be eliminated to satisfy this requirement. Constraints following from biases result in + fixing spins to a specific value. In this case, the parent spin is set to None. Spins in the map that are not eliminated are mapped to themselves. Parameters @@ -242,9 +241,10 @@ def spin_mapping(problem: QUBO, max_terms_and_stats: dict): spin_candidates = set([spin for term in max_terms for spin in term]) # Order term entries in descending magnitude order for correct insertion in solution - sorted_max_ts = sorted(max_terms_and_stats.items(), - key=lambda x: np.abs(x[1]), reverse=True) - + sorted_max_ts = sorted( + max_terms_and_stats.items(), key=lambda x: np.abs(x[1]), reverse=True + ) + # Build spin map from all expectation values for term, stat in sorted_max_ts: @@ -260,7 +260,7 @@ def spin_mapping(problem: QUBO, max_terms_and_stats: dict): # If not, fix it if parent is not None: spin_map.update({spin: (np.sign(stat), None)}) - + # Correlation terms else: @@ -285,8 +285,14 @@ def spin_mapping(problem: QUBO, max_terms_and_stats: dict): # Update the spin map else: - spin_map.update({parent_remove: ( - factor_remove**(-1) * factor_keep * np.sign(stat), parent_keep)}) + spin_map.update( + { + parent_remove: ( + factor_remove ** (-1) * factor_keep * np.sign(stat), + parent_keep, + ) + } + ) # If both spins have been fixed, ignore correlation elif parent_keep is None and parent_remove is None: @@ -294,22 +300,34 @@ def spin_mapping(problem: QUBO, max_terms_and_stats: dict): # If one spin has been fixed, fix the second one according to correlation value else: - + # Extract fixed and unfixed spins - spin_fixed, factor_fixed = (parent_keep, factor_keep) if parent_keep is None else ( - parent_remove, factor_remove) + spin_fixed, factor_fixed = ( + (parent_keep, factor_keep) + if parent_keep is None + else (parent_remove, factor_remove) + ) spin_unfixed, factor_unfixed = ( - parent_remove, factor_remove) if spin_fixed == parent_keep else (parent_keep, factor_fixed) + (parent_remove, factor_remove) + if spin_fixed == parent_keep + else (parent_keep, factor_fixed) + ) # Fix spin - spin_map.update({spin_unfixed: ( - factor_unfixed**(-1) * factor_fixed * np.sign(stat), spin_fixed)}) + spin_map.update( + { + spin_unfixed: ( + factor_unfixed ** (-1) * factor_fixed * np.sign(stat), + spin_fixed, + ) + } + ) # Correct all dependencies for spin in spin_candidates: parent_spin, cumulative_factor = find_parent(spin_map, spin) spin_map.update({spin: (cumulative_factor, parent_spin)}) - + return spin_map @@ -318,8 +336,8 @@ def problem_from_dict(problem_dict: dict): Transforms a QUBO problem, input as a dictionary, into a QUBO problem, output as a QUBO object, ensuring proper labelling of the nodes. For example, for a set of nodes [0,1,4,6] with edges [(0,1),(1,4),(4,6),(0,6)], after the relabelling, the - Hamiltonian object will be constructed with node labels [0,1,2,3] and edges - [(0,1),(1,2),(2,3),(1,3)]. + Hamiltonian object will be constructed with node labels [0,1,2,3] and edges + [(0,1),(1,2),(2,3),(1,3)]. Parameters ---------- @@ -329,7 +347,7 @@ def problem_from_dict(problem_dict: dict): Returns ------- problem: `QUBO` - A QUBO problem object constructed using the classical_hamiltonian() method. + A QUBO problem object constructed using the classical_hamiltonian() method. """ edges = list(problem_dict.keys()) @@ -354,7 +372,8 @@ def problem_from_dict(problem_dict: dict): # Map quadratic term to quadratic term elif len(edge) == 2: label_edges_mapping.update( - {edge: (label_mapping.get(edge[0]), label_mapping.get(edge[1]))}) + {edge: (label_mapping.get(edge[0]), label_mapping.get(edge[1]))} + ) # If constant term, just map to itself else: @@ -362,9 +381,9 @@ def problem_from_dict(problem_dict: dict): # New edges new_edges = list(label_edges_mapping.values()) - + # New hamiltonian - problem = QUBO(n= len(register), terms=new_edges, weights=weights) + problem = QUBO(n=len(register), terms=new_edges, weights=weights) return problem @@ -388,7 +407,7 @@ def redefine_problem(problem: QUBO, spin_map: dict): spin_map: `dict` Updated spin_map with sponatenous eliminations from cancellations during spin removal process. """ - + # Define new QUBO problem as a dictionary new_problem_dict = {} @@ -411,7 +430,7 @@ def redefine_problem(problem: QUBO, spin_map: dict): # If unfixed, define new edge and weight else: new_edge = (parent_spin,) - new_weight = factor_spin*weight + new_weight = factor_spin * weight # Add new edge if not already present in the dictionary if new_problem_dict.get(new_edge) is None: @@ -443,7 +462,7 @@ def redefine_problem(problem: QUBO, spin_map: dict): new_edge = tuple([min(new_edge), max(new_edge)]) # Define new weight from factors in the spin map - new_weight = factor_spin1*factor_spin2*weight + new_weight = factor_spin1 * factor_spin2 * weight # Add new edge if not already present in the dictionary if new_problem_dict.get(new_edge) is None: @@ -457,9 +476,8 @@ def redefine_problem(problem: QUBO, spin_map: dict): else: # Define new bias term keeping the unfixed spin - new_edge = (parent_spin1,) if parent_spin2 is None else ( - parent_spin2,) - new_weight = factor_spin1*factor_spin2*weight + new_edge = (parent_spin1,) if parent_spin2 is None else (parent_spin2,) + new_weight = factor_spin1 * factor_spin2 * weight # Add new edge if not already present in the dictionary if new_problem_dict.get(new_edge) is None: @@ -471,47 +489,55 @@ def redefine_problem(problem: QUBO, spin_map: dict): # New qubit register new_register = set([spin for term in new_problem_dict.keys() for spin in term]) - + # Remove vanishing edges - new_problem_dict = {edge:weight for edge,weight in new_problem_dict.items() if round(weight,10) != 0} + new_problem_dict = { + edge: weight + for edge, weight in new_problem_dict.items() + if round(weight, 10) != 0 + } # Define quadratic register after removing vanishing terms - new_quadratic_register = set([spin for edge in new_problem_dict.keys() if len(edge) == 2 for spin in edge]) + new_quadratic_register = set( + [spin for edge in new_problem_dict.keys() if len(edge) == 2 for spin in edge] + ) # If lengths do not match, there are isolated nodes if len(new_register) != len(new_quadratic_register): isolated_nodes = new_register.difference(new_quadratic_register) - + # Fix isolated nodes for node in isolated_nodes: singlet = (node,) # If no linear term acting on the node, fix arbitrarily if new_problem_dict.get(singlet) is None: - spin_map.update({node:(1,None)}) + spin_map.update({node: (1, None)}) # If linear term present, fix accordingly by anti-aligning else: factor = -np.sign(new_problem_dict.get(singlet)) - spin_map.update({node:(factor,None)}) + spin_map.update({node: (factor, None)}) # Delete isolated node from new problem new_problem_dict.pop((node,)) # For some unweighted graphs specific eliminations can lead to eliminating the whole instance before reaching cutoff. if new_problem_dict == {}: - new_problem = problem # set the problem to the old problem and solve classically for the smallest non-vanishing instance. - + new_problem = problem # set the problem to the old problem and solve classically for the smallest non-vanishing instance. + else: # Redefine new QUBO problem from the dictionary new_problem = problem_from_dict(new_problem_dict) - + return new_problem, spin_map -def final_solution(elimination_tracker: list, cl_states: list, hamiltonian: Hamiltonian): +def final_solution( + elimination_tracker: list, cl_states: list, hamiltonian: Hamiltonian +): """ - Constructs the final solution to the problem by obtaining the final states from adding the removed + Constructs the final solution to the problem by obtaining the final states from adding the removed spins into the classical results and computing the corresponding energy. Parameters @@ -520,11 +546,11 @@ def final_solution(elimination_tracker: list, cl_states: list, hamiltonian: Hami List of dictionaries, where each dictionary contains the elimination rules applied at each step of the process. Dictionary keys correspond to spin pairs (i,j), always with i 0 else state.insert(j, prev_corr ^ 1) + state.insert(j, prev_corr) if val > 0 else state.insert( + j, prev_corr ^ 1 + ) # Store solution states and their energy - full_solution.update({"".join(str(i) for i in state):bitstring_energy(hamiltonian, state)}) + full_solution.update( + {"".join(str(i) for i in state): bitstring_energy(hamiltonian, state)} + ) return full_solution def solution_for_vanishing_instances(hamiltonian: Hamiltonian, spin_map: dict): - cl_ground_states = [""] - + """ + Constructs the final solution of the smallest non vanishing problem by obtaining the final states by generating all permutations of the spins which can be fixe arbitrarily while obeying the correlations identified by the last run of QAOA before the problem vanished. + Computing the corresponding energy only of the first string, assuming they are degenerate. + + Parameters + ---------- + spin_map: `dict` + Spin map containing the correlations and eliminations of the smallest non vanishing problem statement. + hamiltonian: `Hamiltonian` + Hamiltonian object containing the smallest non vanishing problem statement. + + Returns + ------- + cl_energy: `float` + The energy of the first solution wrt the cost Hamiltonian. + + cl_ground_states: `list` + List of strings of binary values representing the classical solution of the problem respecting the spin map. + """ + cl_ground_states = [""] + for spin in spin_map.keys(): new_cl_ground_states = [] - + if spin_map[spin][1] == None: # add 0 or 1 arbitrarily - + for ground_state in cl_ground_states: first_new_ground_state = ground_state + "0" second_new_ground_state = ground_state + "1" - + new_cl_ground_states.append(first_new_ground_state) new_cl_ground_states.append(second_new_ground_state) - + cl_ground_states = new_cl_ground_states - + else: # fix according to correlation factor factor = spin_map[spin][0] parent = spin_map[spin][1] - + for ground_state in cl_ground_states: if factor == 1.0: # correlated @@ -611,14 +659,13 @@ def solution_for_vanishing_instances(hamiltonian: Hamiltonian, spin_map: dict): else: # anticorrelated new_value_spin = str(int(not bool(int(ground_state[parent])))) - + new_ground_state = ground_state + new_value_spin new_cl_ground_states.append(new_ground_state) - + cl_ground_states = new_cl_ground_states # computing the energy of the first one only, assuming degeneracy - cl_energy = bitstring_energy(hamiltonian, cl_ground_states[0]) + cl_energy = bitstring_energy(hamiltonian, cl_ground_states[0]) return cl_energy, cl_ground_states - From 7e50e7c4c4914182078df7555c4f48a8c52671e9 Mon Sep 17 00:00:00 2001 From: vishal-ph Date: Wed, 18 Jan 2023 13:52:55 +0800 Subject: [PATCH 11/11] remove extra empty line --- openqaoa/rqaoa/rqaoa.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openqaoa/rqaoa/rqaoa.py b/openqaoa/rqaoa/rqaoa.py index 4397cf097..3a7c6be13 100644 --- a/openqaoa/rqaoa/rqaoa.py +++ b/openqaoa/rqaoa/rqaoa.py @@ -640,7 +640,6 @@ def solution_for_vanishing_instances(hamiltonian: Hamiltonian, spin_map: dict): ------- cl_energy: `float` The energy of the first solution wrt the cost Hamiltonian. - cl_ground_states: `list` List of strings of binary values representing the classical solution of the problem respecting the spin map. """